input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
network and returns behavior traces
:param stimulus: The stimulus to present to the network
:return:
[0]: Behavior instantiations (-1: none, 0: stay, 1: straight, 2: left, 3: right)
[1]: Speed trace
[2]: Angle trace
"""
history = GlobalDefs.hist_seconds*GlobalDefs.frame_rate
step = history
model_in = np.zeros((1, 3, history, 1))
behav_types = np.full(stimulus.size, -1, np.int8)
speed_trace = np.zeros_like(stimulus)
angle_trace = np.zeros_like(stimulus)
last_p_move_evaluation = -100 # tracks the frame when we last updated our movement evaluation
p_eval = self.p_move
while step < stimulus.size:
# update our movement probability if necessary
if step - last_p_move_evaluation >= self.eval_every:
model_in[0, 0, :, 0] = stimulus[step - history:step]
p_eval = self._get_bout_probability(model_in)
last_p_move_evaluation = step
# first invoke the bout clock and pass if we shouldn't select a behavior
if self._uni_cash.next_rand() > p_eval:
step += 1
continue
model_in[0, 0, :, 0] = stimulus[step - history:step]
model_in[0, 1, :, 0] = (speed_trace[step - history:step] - self.disp_mean) / self.disp_std
model_in[0, 2, :, 0] = (angle_trace[step - history:step] - self.ang_mean) / self.ang_std
model_out = self.model.predict(model_in, 1.0, self.remove).ravel()
if self.t_preferred is None:
behav_ranks = np.argsort(model_out)
else:
diffs = np.abs(model_out - self.t_preferred) # Note that t_preferred is already normalized
behav_ranks = np.argsort(diffs)
bt = self._select_behavior(behav_ranks)
if bt == "N":
behav_types[step] = 0
step += 1
continue
elif bt == "S":
behav_types[step] = 1
elif bt == "L":
behav_types[step] = 2
else:
behav_types[step] = 3
disp, dh = self._get_bout(bt)
if step + self.blen <= stimulus.size:
speed_trace[step:step + self.blen] = disp
angle_trace[step:step + self.blen] = dh
else:
speed_trace[step:] = disp[:speed_trace[step:].size]
angle_trace[step:] = dh[:angle_trace[step:].size]
step += self.blen
return behav_types, speed_trace, angle_trace
def compute_behavior_kernels(self, n_samples=1e6):
"""
Generates white-noise samples and presents them as an open-loop stimulus to the network computing filter kernels
:param n_samples: The number of white-noise samples to use
:return:
[0]: Stay kernel (all kernels go HIST_SECONDS*FRAME_RATE into the past and FRAME_RATE into the future)
[1]: Straight kernel
[2]: Left kernel
[3]: Right kernel
"""
def kernel(t):
indices = np.arange(n_samples)[btype_trace == t]
ixm = indexing_matrix(indices, GlobalDefs.hist_seconds*GlobalDefs.frame_rate, GlobalDefs.frame_rate,
int(n_samples))[0]
return np.mean(stim[ixm]-np.mean(stim), 0)
stim = self._generate_white_noise(n_samples)
btype_trace = self.compute_openloop_behavior(stim)[0]
return kernel(0), kernel(1), kernel(2), kernel(3)
def compute_behav_trig_activity(self, n_samples=1e6, stim=None):
"""
Generates white-noise samples, presenting them as an open-loop stimulus and extracting behavior triggered
unit activity averages for network units
:param n_samples: The number of white-noise samples to use
:param stim: Optional stimulus, otherwise white noise samples will be generated
:return:
Nested dictionary with keys for each bout type (0-3) and a network branch index dictionary for each key
containing a list of layer unit kernels.
"""
def kernel(t, u_response):
if u_response.size != n_samples:
raise ValueError("Got wrong response length")
indices = np.arange(n_samples)[btype == t]
ixm = indexing_matrix(indices, GlobalDefs.hist_seconds*GlobalDefs.frame_rate, GlobalDefs.frame_rate,
int(n_samples))[0]
return np.mean(u_response[ixm]-np.mean(u_response), 0)
if stim is None:
stim = self._generate_white_noise(n_samples)
else:
n_samples = stim.size
btype, speed, angle = self.compute_openloop_behavior(stim)
# NOTE: To align activity and behavior traces, need to provide lead-in!!!
lead_in = np.zeros(self.model.input_dims[2] - 1)
# NOTE: Computing unit_stimulus_responses re-standardadizes temperature below - so re-conver to celsius first!
stim = stim.copy()*self.std.temp_std + self.std.temp_mean
# dictionary, indexed by branches, with list for each branch with n-layers entries of stim x n_units activations
unit_dict = self.model.unit_stimulus_responses(np.r_[lead_in, stim], np.r_[lead_in, speed],
np.r_[lead_in, angle], self.std)
# for each unit, compute it's kernels - HIST_SECONDS*FRAME_RATE into the past and FRAME_RATE into the future
# one kernel for each behavior type
k_size = -1
kernel_dict = {bt: {} for bt in range(4)}
for bt in range(4):
for b_name in unit_dict:
if b_name not in kernel_dict[bt]:
kernel_dict[bt][b_name] = []
for layer_act in unit_dict[b_name]:
if k_size == -1:
k_size = kernel(0, stim).size
layer_kernels = np.zeros((k_size, layer_act.shape[1]))
for uix in range(layer_act.shape[1]):
layer_kernels[:, uix] = kernel(bt, layer_act[:, uix])
kernel_dict[bt][b_name].append(layer_kernels)
return kernel_dict
class BoutFrequencyEvolver(CircleGradSimulation):
"""
Class to obtain parameters that turn the output of the temperature branch of a ZfGpNetworkModel into bout frequency
such that gradient navigation efficiency will be maximised by running an evolutionary algorithm
"""
def __init__(self, stds: GradientStandards, model: ZfGpNetworkModel, n_sel_best=10, n_sel_random=6, n_progeny=2):
"""
Creates a new BoutFrequencyEvolver
:param stds: Data standardizations
:param model: The network model to use
:param n_sel_best: The number of top performing networks to select for each next generation
:param n_sel_random: The number of not top performing networks to select for each next generation
:param n_progeny: For each network pairing the number of child networks to produce
"""
super().__init__(model, stds, 100, 22, 37, 26)
self.model_branch = 't'
self.n_sel_best = n_sel_best
self.n_sel_random = n_sel_random
self.n_progeny = n_progeny
self.weight_mat = np.random.randn(self.n_networks, self.n_weights)
self.generation = 0
def run_ideal(self, nsteps, pfail=0.0):
raise NotImplementedError("Function removed in this class")
def run_simulation(self, nsteps, debug=False):
"""
Runs simulation across all networks in a quasi-parallel manner
:param nsteps: The number of simulation steps to perform
:param debug: IGNORED
:return: Returns a list of position arrays for each network
"""
history = GlobalDefs.frame_rate * GlobalDefs.hist_seconds
burn_period = history * 2
start = history + 1
net_pos = []
for i in range(self.n_networks):
p = np.full((nsteps + burn_period, 3), np.nan)
p[:start + 1, :] = self.get_start_pos()[None, :]
net_pos.append(p)
steps = np.full(self.n_networks, start) # for each simulation its current step
# to avoid network queries we only evaluate bout frequencies of networks every ~ 20 steps
last_bf_eval = np.full_like(steps, -100) # for each network the last step in which bf was evaluated
bf_eval_model_in = np.zeros((self.n_networks, 3, history, 1)) # model input to evaluate bfreq across networks
bfreqs = np.full(self.n_networks, self.p_move) # initialize all bout frequencies to base value
while np.all(steps < nsteps + burn_period): # run until first network is finished
if np.all(steps-last_bf_eval >= 20) or np.any(steps-last_bf_eval >= 50):
# newly evaluate bout frequencies
for i in range(self.n_networks):
t = self.temperature(net_pos[i][steps[i] - history:steps[i], 0],
net_pos[i][steps[i] - history:steps[i], 1])
bf_eval_model_in[i, 0, :, 0] = t
bf_eval_model_in -= self.temp_mean # these operations are ok since we only care about the
bf_eval_model_in /= self.temp_std # temperature input part - rest can be arbitrary values
branch_out = self.model.branch_output(self.model_branch, bf_eval_model_in, self.remove)
bfreqs = np.sum(branch_out * self.weight_mat, 1)
# apply non-linearity
bfreqs = 1 / (1 + np.exp(-bfreqs)) # [0, 1]
bfreqs = ((2 - 0.5) * bfreqs + 0.5) * self.bf_mult # [0.5, 2]
bfreqs /= GlobalDefs.frame_rate # turn into probability
last_bf_eval = steps.copy() # update indicator
# determine which networks should move in this step - one decider draw for all
d = self._uni_cash.next_rand()
non_movers = np.nonzero(d > bfreqs)[0]
movers = np.nonzero(d <= bfreqs)[0]
for ix in non_movers:
s = steps[ix]
net_pos[ix][s, :] = net_pos[ix][s - 1, :]
steps[ix] = s+1
if movers.size == 0:
continue
# for each mover compute model prediction
for n, ix in enumerate(movers):
s = steps[ix]
t = self.temperature(net_pos[ix][s - history:s, 0], net_pos[ix][s - history:s, 1])
bf_eval_model_in[n, 0, :, 0] = (t - self.temp_mean) / self.temp_std
spd = np.sqrt(np.sum(np.diff(net_pos[ix][s - history - 1:s, 0:2], axis=0) ** 2, 1))
bf_eval_model_in[n, 1, :, 0] = (spd - self.disp_mean) / self.disp_std
dang = np.diff(net_pos[ix][s - history - 1:s, 2], axis=0)
bf_eval_model_in[n, 2, :, 0] = (dang - self.ang_mean) / self.ang_std
model_out = self.model.predict(bf_eval_model_in[:movers.size, :, :, :], 1.0, self.remove)
# for each mover turn model prediction into behavior and execute
for n, ix in enumerate(movers):
s = steps[ix]
proj_diff = np.abs(model_out[n, :] - (self.t_preferred - self.temp_mean) / self.temp_std)
behav_ranks = np.argsort(proj_diff)
bt = self.select_behavior(behav_ranks)
if bt == "N":
net_pos[ix][s, :] = net_pos[ix][s - 1, :]
steps[ix] = s+1
continue
traj = self.get_bout_trajectory(net_pos[ix][s - 1, :], bt)
if s + self.blen <= nsteps + burn_period:
net_pos[ix][s:s+self.blen, :] = traj
else:
net_pos[ix][s:, :] = traj[:net_pos[ix][s:, :].shape[0], :]
steps[ix] = s + self.blen
return [pos[burn_period:steps[i], :] for i, pos in enumerate(net_pos)]
def score_networks(self, nsteps):
"""
For each network runs simulation and rates the error as the average temperature distance
from the desired temperature weighted by the inverse of the radius
:param nsteps: The number of simulation steps to (approximately) perform for each network
:return:
[0]: Average deviation from desired temperature for each network
[1]: List of position traces of each network
"""
net_pos | |
assert Task.select().where(Task.task_type == TaskType.FUNDING).count() == 4
task = Task.select().where(Task.task_type == TaskType.FUNDING).order_by(Task.id.desc()).first()
assert task.records.count() == 2
# Activate a single record:
resp = client.post(
"/admin/fundingrecord/action/",
follow_redirects=True,
data={
"url": f"/admin/fundingrecord/?task_id={task.id}",
"action": "activate",
"rowid": task.records.first().id,
})
assert FundingRecord.select().where(FundingRecord.task_id == task.id,
FundingRecord.is_active).count() == 1
# Activate all:
resp = client.post("/activate_all", follow_redirects=True, data=dict(task_id=task.id))
assert FundingRecord.select().where(FundingRecord.task_id == task.id,
FundingRecord.is_active).count() == 2
# Reste a single record
FundingRecord.update(processed_at=datetime.datetime(2018, 1, 1)).execute()
resp = client.post(
"/admin/fundingrecord/action/",
follow_redirects=True,
data={
"url": f"/admin/fundingrecord/?task_id={task.id}",
"action": "reset",
"rowid": task.records.first().id,
})
assert FundingRecord.select().where(FundingRecord.task_id == task.id,
FundingRecord.processed_at.is_null()).count() == 1
assert UserInvitation.select().count() == 1
resp = client.post(
"/admin/task/delete/",
data={
"id": task.id,
"url": "/admin/task/"
},
follow_redirects=True)
assert resp.status_code == 200
assert not Task.select().where(Task.id == task.id).exists()
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(
"""title,translated title,language,type,org type,short description,amount,aurrency,start,end,org name,city,region,country,disambiguated organisation identifier,disambiguation source,orcid id,name,role,email,external identifier type,external identifier value,external identifier url,external identifier relationship
THIS IS A TITLE, नमस्ते,hi,,MY TYPE,Minerals unde.,300000,NZD.,,2025,Royal Society Te Apārangi,Wellington,,New Zealand,210126,RINGGOLD,1914-2914-3914-00X3, GivenName Surname, LEAD, <EMAIL>,grant_number,GNS1706900961,https://www.grant-url2.com,PART_OF
""".encode() # noqa: E501
), # noqa: E501
"error.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"type is mandatory" in resp.data
resp = client.post(
"/load/researcher/funding",
data={"file_": (BytesIO(b"title\nVAL"), "error.csv")},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"Expected CSV or TSV format file." in resp.data
resp = client.post(
"/load/researcher/funding",
data={"file_": (BytesIO(b"header1,header2,header2\n1,2,3"), "error.csv")},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"Failed to map fields based on the header of the file" in resp.data
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(
"""title,translated title,language,type,org type,short description,amount,aurrency,start,end,org name,city,region,country,disambiguated organisation identifier,disambiguation source,orcid id,name,role,email,external identifier type,external identifier value,external identifier url,external identifier relationship
THIS IS A TITLE #2, नमस्ते #2,hi, CONTRACT,MY TYPE,Minerals unde.,900000,USD.,,**ERROR**,,,,,210126,RINGGOLD,1914-2914-3914-00X3, GivenName Surname, LEAD, <EMAIL>,grant_number,GNS1706900961,https://www.grant-url2.com,PART_OF""".encode() # noqa: E501
), # noqa: E501
"fundings.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"Wrong partial date value '**ERROR**'" in resp.data
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(
"""title,translated title,language,type,org type,short description,amount,aurrency,start,end,org name,city,region,country,disambiguated organisation identifier,disambiguation source,orcid id,name,role,email,external identifier type,external identifier value,external identifier url,external identifier relationship
THIS IS A TITLE, नमस्ते,hi, CONTRACT,MY TYPE,Minerals unde.,300000,NZD.,,2025,Royal Society Te Apārangi,Wellington,,New Zealand,210126,RINGGOLD,1914-2914-3914-00X3, GivenName Surname, LEAD,**ERROR**,grant_number,GNS1706900961,https://www.grant-url2.com,PART_OF""".encode() # noqa: E501
), # noqa: E501
"fundings.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"Invalid email address '**error**'" in resp.data
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(
"""title,translated title,language,type,org type,short description,amount,aurrency,start,end,org name,city,region,country,disambiguated organisation identifier,disambiguation source,orcid id,name,role,email,external identifier type,external identifier value,external identifier url,external identifier relationship
THIS IS A TITLE, नमस्ते,hi, CONTRACT,MY TYPE,Minerals unde.,300000,NZD.,,2025,Royal Society Te Apārangi,Wellington,,New Zealand,210126,RINGGOLD,ERRO-R914-3914-00X3, GivenName Surname, LEAD,<EMAIL>,grant_number,GNS1706900961,https://www.grant-url2.com,PART_OF """.encode() # noqa: E501
), # noqa: E501
"fundings.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"Failed to load funding record file" in resp.data
assert b"Invalid ORCID iD ERRO-R" in resp.data
# without "excluded"
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(
"""Put Code,Title,Translated Title,Translated Title Language Code,Type,Organization Defined Type,Short Description,Amount,Currency,Start Date,End Date,Org Name,City,Region,Country,Disambiguated Org Identifier,Disambiguation Source,Visibility,ORCID iD,Email,First Name,Last Name,Name,Role,External Id Type,External Id Value,External Id Url,External Id Relationship,Identifier
,This is the project title,,,CONTRACT,Fast-Start,This is the project abstract,300000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,0000-0002-9207-4933,,,,Associate Professor A Contributor 1,lead,grant_number,XXX1701,,SELF,
,This is the project title,,,CONTRACT,Fast-Start,This is the project abstract,300000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,,,,Dr B Contributor 2,co_lead,grant_number,XXX1701,,SELF,
,This is the project title,,,CONTRACT,Fast-Start,This is the project abstract,300000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,,,,Dr E Contributor 3,,grant_number,XXX1701,,SELF,
,This is another project title,,,CONTRACT,Standard,This is another project abstract,800000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,<EMAIL>,,,Associate Professor F Contributor 4,lead,grant_number,XXX1702,,SELF,9999
,This is another project title,,,CONTRACT,Standard,This is another project abstract,800000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,<EMAIL>,John,Doe,,co_lead,grant_number,XXX1702,,SELF,8888 """.encode() # noqa: E501
), # noqa: E501
"fundings042.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"This is the project title" in resp.data
assert b"This is another project title" in resp.data
assert b"fundings042.csv" in resp.data
assert Task.select().where(Task.task_type == TaskType.FUNDING).count() == 4
task = Task.select().where(Task.filename == "fundings042.csv").first()
assert task.records.count() == 2
fr = task.records.where(FundingRecord.title == "This is another project title").first()
assert fr.contributors.count() == 0
assert fr.external_ids.count() == 1
assert fr.invitees.count() == 2
resp = client.post("/activate_all", follow_redirects=True, data=dict(task_id=task.id))
UserInvitation.select().where(UserInvitation.task_id == task.id).count() == 2
FundingRecord.update(processed_at="1/1/2019").where(FundingRecord.task_id == task.id).execute()
resp = client.post("/rest_all", follow_redirects=True, data=dict(task_id=task.id))
UserInvitation.select().where(UserInvitation.task_id == task.id).count() == 2
FundingRecord.select().where(FundingRecord.task_id == task.id,
FundingRecord.processed_at.is_null()).execute()
resp = client.get(f"/admin/fundingrecord/export/tsv/?task_id={task.id}")
assert resp.headers["Content-Type"] == "text/tsv; charset=utf-8"
assert len(resp.data.splitlines()) == 4
resp = client.post(
"/load/researcher/funding",
data={
"file_": (
BytesIO(b"""Funding Id,Identifier,Put Code,Title,Translated Title,Translated Title Language Code,Type,Organization Defined Type,Short Description,Amount,Currency,Start Date,End Date,Org Name,City,Region,Country,Disambiguated Org Identifier,Disambiguation Source,Visibility,ORCID iD,Email,First Name,Last Name,Name,Role,Excluded,External Id Type,External Id Url,External Id Relationship
XXX1701,00002,,This is the project title,,,CONTRACT,Fast-Start,This is the project abstract,300000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,<EMAIL>,Bob,Contributor 2,,,Y,grant_number,,SELF
XXX1701,00003,,This is the project title,,,CONTRACT,Fast-Start,This is the project abstract,300000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,<EMAIL>,Eve,Contributor 3,,,Y,grant_number,,SELF
XXX1702,00004,,This is another project title,,,CONTRACT,Standard,This is another project abstract,800000,NZD,2018,2021,Marsden Fund,Wellington,,NZ,http://dx.doi.org/10.13039/501100009193,FUNDREF,,,<EMAIL>,Felix,Contributor 4,,,Y,grant_number,,SELF""" # noqa: E501
), # noqa: E501
"fundings_ex.csv",
),
},
follow_redirects=True)
assert resp.status_code == 200
assert b"the project title" in resp.data
assert b"fundings_ex.csv" in resp.data
assert Task.select().where(Task.task_type == TaskType.FUNDING).count() == 5
task = Task.select().where(Task.task_type == TaskType.FUNDING).order_by(Task.id.desc()).first()
assert task.records.count() == 2
# Change invitees:
record = task.records.first()
invitee_count = record.invitees.count()
url = quote(f"/url/?record_id={record.id}", safe='')
resp = client.post(
f"/admin/fundinginvitee/new/?url={url}",
data={
"email": "<EMAIL>",
"first_name": "<NAME>",
"last_name": "<NAME>",
"visibility": "PUBLIC",
})
assert record.invitees.count() > invitee_count
invitee = record.invitees.first()
resp = client.post(
f"/admin/fundinginvitee/edit/?id={invitee.id}&url={url}",
data={
"email": "<EMAIL>",
"first_name": invitee.first_name + "NEW",
"visibility": "PUBLIC",
})
assert record.invitees.count() > invitee_count
assert record.invitees.first().first_name == invitee.first_name + "NEW"
assert record.invitees.first().email == "<EMAIL>"
# Change contributors:
record = task.records.first()
contributor_count = record.contributors.count()
url = quote(f"/admin/fundingrecord/?record_id={record.id}&task_id={task.id}", safe='')
resp = client.post(
f"/admin/fundingcontributor/new/?url={url}",
data={
"email": "<EMAIL>",
"name": "<NAME>",
"role": "ROLE",
})
assert record.contributors.count() > contributor_count
contributor = record.contributors.first()
resp = client.post(
f"/admin/fundingcontributor/edit/?id={contributor.id}&url={url}",
data={
"email": "<EMAIL>",
"orcid": "AAAA-2738-3738-00X3",
})
c = FundingContributor.get(contributor.id)
assert c.email != "<EMAIL>"
assert c.orcid != "AAAA-2738-3738-00X3"
assert b"Invalid ORCID" in resp.data
resp = client.post(
f"/admin/fundingcontributor/edit/?id={contributor.id}&url={url}",
data={
"email": "<EMAIL>",
"orcid": "1631-2631-3631-00X3",
})
c = FundingContributor.get(contributor.id)
assert c.email == "<EMAIL>"
assert c.orcid == "1631-2631-3631-00X3"
# Add a new funding record:
url = quote(f"/admin/fundingrecord/?task_id={task.id}", safe='')
record_count = Task.get(task.id).records.count()
resp = client.post(
f"/admin/fundingrecord/new/?url={url}",
follow_redirects=True,
data={
"title": "FUNDING TITLE",
"type": "AWARD",
"_continue_editing": "Save and Continue Editing",
})
assert Task.get(task.id).records.count() == record_count + 1
capture_event.assert_called()
def test_researcher_work(client, mocker):
"""Test preload work data."""
exception = mocker.patch.object(client.application.logger, "exception")
user = client.data["admin"]
client.login(user, follow_redirects=True)
resp = client.post(
"/load/researcher/work",
data={
"file_": (
BytesIO(
b'[{"invitees": [{"identifier":"00001", "email": "<EMAIL>",'
b'"first-name": "Alice", "last-name": "Contributor 1", "ORCID-iD": null, "put-code":null}],'
b'"title": { "title": { "value": "WORK TITLE #1"}}, "citation": {"citation-type": '
b'"FORMATTED_UNSPECIFIED", "citation-value": "This is citation value"}, "type": "BOOK_CHR",'
b'"contributors": {"contributor": [{"contributor-attributes": {"contributor-role": '
b'"AUTHOR", "contributor-sequence" : "1"},"credit-name": {"value": "firentini"}}]}'
b', "external-ids": {"external-id": [{"external-id-value": '
b'"GNS170661","external-id-type": "grant_number", "external-id-relationship": "SELF"}]}}]'),
"work001.json",
),
"email":
user.email
},
follow_redirects=True)
assert resp.status_code == 200
# Work file successfully loaded.
assert b"WORK TITLE #1" in resp.data
assert b"BOOK_CHR" in resp.data
task = Task.get(filename="work001.json")
assert task.records.count() == 1
rec = task.records.first()
assert rec.external_ids.count() == 1
assert rec.contributors.count() == 1
assert rec.invitees.count() == 1
# Activate a single record:
resp = client.post(
"/admin/workrecord/action/",
follow_redirects=True,
data={
"url": f"/admin/workrecord/?task_id={task.id}",
"action": "activate",
"rowid": task.records.first().id,
})
assert WorkRecord.select().where(WorkRecord.task_id == task.id,
WorkRecord.is_active).count() == 1
# Activate all:
resp = client.post("/activate_all", follow_redirects=True, data=dict(task_id=task.id))
assert WorkRecord.select().where(WorkRecord.task_id == task.id,
WorkRecord.is_active).count() == 1
# Reste a single record
WorkRecord.update(processed_at=datetime.datetime(2018, 1, 1)).execute()
resp = client.post(
"/admin/workrecord/action/",
follow_redirects=True,
data={
"url": f"/admin/fundingrecord/?task_id={task.id}",
"action": "reset",
"rowid": task.records.first().id,
})
assert WorkRecord.select().where(WorkRecord.task_id == task.id,
WorkRecord.processed_at.is_null()).count() == 1
resp = client.get(f"/admin/workrecord/export/csv/?task_id={task.id}")
assert resp.headers["Content-Type"] == "text/csv; charset=utf-8"
assert len(resp.data.splitlines()) == 2
resp = client.get(f"/admin/workrecord/export/csv/?task_id={task.id}")
assert resp.headers["Content-Type"] == "text/csv; charset=utf-8"
assert len(resp.data.splitlines()) == 2
resp = client.post(
"/load/researcher/work",
data={
"file_": (
BytesIO("""[{
"invitees": [
{
"identifier": "00001", "email": "<EMAIL>",
"first-name": "Alice", "last-name": "<NAME>",
"ORCID-iD": "0000-0002-9207-4933", "put-code": null, "visibility": null
},
{
"identifier": "00002", "email": "<EMAIL>",
"first-name": "Bob", "last-name": "<NAME>", "ORCID-iD": null,
"put-code": null, "visibility": null
}
],
"path": null,
"title": {
"title": {"value": "This is a title"},
"subtitle": null,
"translated-title": {"value": "हिंदी","language-code": "hi"}
},
"journal-title": {"value": "This is a journal title"},
"short-description": "xyz this is short description",
"citation": {"citation-type": "FORMATTED_UNSPECIFIED", "citation-value": "This is citation value"},
"type": "BOOK_CHAPTER",
"publication-date": {
"year": {"value": "2001"},
"month": {"value": "1"},
"day": {"value": "12"},
"media-type": null
},
"external-ids": {
"external-id": [{
"external-id-type": "bibcode",
"external-id-value": "sdsds",
"external-id-url": {"value": "http://url.edu/abs/ghjghghj"},
"external-id-relationship": "SELF"
}
]
},
"url": null,
"contributors": {
"contributor": [
{"contributor-attributes": {"contributor-sequence": "FIRST", "contributor-role": "AUTHOR"},
"credit-name": {"value": "<NAME>"},
"contributor-orcid": | |
<gh_stars>0
#! usr/bin/env python
import numpy as np
import datetime
import random
import numpy as np
from collections import deque
import json
from collections import defaultdict
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import tensorflow as tf
from keras.layers.normalization import BatchNormalization
import time
import sys
import pickle
import auxiliary_functions, make_dataset
from py_geohash_any import geohash as gh
from keras import backend as K
from auxiliary_functions import convert_miles_to_minutes_nyc, \
list_of_output_predictions_to_direction
__author__ = '<NAME>'
#parameters
ACTIONS = 9 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVATION = 10000. # timesteps to observe before training
EXPLORE = 3000000 # frames over which to anneal epsilon
FINAL_EPSILON = 0.001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
TRAINING_EPSILON = .0001
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 1e-1
class RLNYCTaxiCab(object):
"""Creates an mlp model with DQN to train on NYC taxi data from January 2016.
Uses a MLP model with DQN."""
def __init__(self, list_of_unique_geohashes,list_of_time_index, list_of_geohash_index,
list_of_inverse_heohash_index, final_data_structure, return_metrics=False):
"""Sotre the data attributes needed to train out model."""
self.list_of_unique_geohashes = list_of_unique_geohashes
self.list_of_time_index = list_of_time_index
self. list_of_geohash_index = list_of_geohash_index
self.list_of_inverse_heohash_index = list_of_inverse_heohash_index
self.final_data_structure = final_data_structure
self.build_mlp_model()
self.return_metrics = return_metrics
def build_mlp_model(self):
"""Build a simple MLP model.
Input time follwoed by the geohash index. """
model_mlp = Sequential()
model_mlp.add(Dense(100, input_shape=(2,)))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(500))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(1000))
model_mlp.add(BatchNormalization())
model_mlp.add(Activation('relu'))
model_mlp.add(Dropout(.3))
model_mlp.add(Dense(9, activation='linear')) ## predict which geohash to move to next
adam = Adam(lr=LEARNING_RATE)
model_mlp.compile(loss='mse',optimizer=adam)
self.model_mlp = model_mlp
def NaiveApproach(self, s_time_, s_geohash_,starting_geo,
input_fare_list = None, historic_current_fare = None):
"""Assign the same probability to every state and keep track of the
total fare received, total fare over time,
and geohashes visited.
Terminates after a 'day' is finished"""
## parameters to track where we are and at what time
starting_geohash = starting_geo
s_time = s_time_
s_geohash = s_geohash_
list_of_geohashes_visited = []
## check and see if we have old fare to continue adding to
if input_fare_list == None:
total_fare = 0
total_fare_over_time = []
else:
total_fare = historic_current_fare
total_fare_over_time = input_fare_list
while True:
a_t = np.zeros([ACTIONS])
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
#Get the neighbors from the current geohash - convert back to string
current_geohash_string = self.list_of_inverse_heohash_index[s_geohash]
neighbors = gh.neighbors(current_geohash_string)
# Get the direction we should go
direction_to_move_to = list_of_output_predictions_to_direction[action_index]
# Get the geohash of the direction we moved to
if direction_to_move_to =='stay':
new_geohash = starting_geohash # stay in current geohash, get the index of said geohash
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
# hash with the letters of the geohash above
new_geohash = self.list_of_geohash_index[starting_geohash]
else:
new_geohash = neighbors[direction_to_move_to]## give us the geohash to move to next
# get the reward of the geohash we just moved to (this is the ratio of fare /time of trip)
# time, geohash, list of tuple ( fare, time ,ratio)
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
if len (possible_rewards) ==0:
r_t = -.1 # we do not have information for this time and geohash, don't go here. waste gass
fare_t = 0 # no information so the fare = 0
s_time1 = s_time+10 # assume this took ten minutes
else:
reward_option = np.random.randint(0,len(possible_rewards))
r_t = possible_rewards[reward_option][2] # get the ratio of fare / trip time
fare_t = possible_rewards[reward_option][0]
# get the trip length
s_time1 = s_time + possible_rewards[reward_option][1]
s_geohash1 = self.list_of_geohash_index[new_geohash]
# store the transition in D
if s_time1 <= 2350: # The last possible time for a trip
terminal = 0
# get the naive implementation per day
else: # the day is over, pick a new starting geohash and time
break # the day is over
total_fare += fare_t
total_fare_over_time.append(total_fare)
list_of_geohashes_visited.append(starting_geohash)
# increment the state and time information
s_time = s_time1
s_geohash = s_geohash1
starting_geohash = new_geohash #update the starting geohash in case we stay here
return total_fare, total_fare_over_time, list_of_geohashes_visited
def trainNetworkNeuralNetworkTaxicab(self, args, training_length=1000,
return_training_data = False, save_model = False):
# Code adapted from https://github.com/yanpanlau/Keras-FlappyBird/blob/master/qlearn.py
"""Train a DQN algorithm to learn how the best geohashes to go to throughout the day.
Each geohash is about
3803 x 3803 meters (~15 minutes of driving time to traverse in NYC).
This algoirthm incorporates experience replay to stablize the training procedure
for the DQN algorithm. Due to the large size of the input features,
you need to train for a long time (1-2million iterations) .
This implementation also uses a Naive approach which has both the DQN and
Naive implementation start at the same geohash and same time. Then,
each algorithm will run until the day is finished keeping track
of the geohashes visited and fare received.
This information is finally returned."""
self.return_training_data = return_training_data
# store the previous observations in replay memory
D = deque()
# get the first state by randomlly choosing a geohash to start at and random time to start at
# Assume that the first state has no reward associated with it
# Over multiple steps, starting geohash becomes the previous geohash we visited
starting_geohash = np.random.choice(self.list_of_unique_geohashes)
s_time = np.random.choice(self.list_of_time_index)
s_geohash = self.list_of_geohash_index[starting_geohash]
s_t = np.array([[s_time,
s_geohash]])
if args['mode'] == 'Run':
OBSERVE = 1000 #We keep observe, never train
epsilon = TRAINING_EPSILON
print ("Now we load weight")
self.model_mlp.load_weights(args['model_weights_load'])
adam = Adam(lr=LEARNING_RATE)
self.model_mlp.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")
else: #We go to training mode
OBSERVE = OBSERVATION
epsilon = INITIAL_EPSILON
#start your observations
t = 0
total_days_driven = 0
loss_list = []
total_fare_received = 0
total_fare_received_over_time = []
list_of_geohashes_visited = []
total_naive_fare = 0
total_naive_fare_over_time =[]
list_of_naive_geohashes_visited = []
if return_training_data == True:
self.training_data_X = np.zeros((training_length+1,2))
self.training_data_y = np.zeros((training_length+1,ACTIONS))
if self.return_metrics == True: ## Compare to a naive approach, only train / observe
if t > OBSERVE:
total_naive_fare, total_naive_fare_over_time, list_of_naive_geohashes_visited = \
self.NaiveApproach(s_time, s_geohash,starting_geohash)
start_time = time.time()
while (True):
loss = 0
Q_sa = 0
action_index = 0
r_t = 0
a_t = np.zeros([ACTIONS])
#choose a random action action epsilon greedy
if t % FRAME_PER_ACTION == 0: ## will always choose this if frame per action is 1
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS) # Randomlly choose another geohash to go to
a_t[action_index] = 1
else:
#print("------------Predicted Action___________")
q = self.model_mlp.predict(s_t) #input the time followed by the geohash index
max_Q = np.argmax(q) # find the position of the highest probability (which direction to go in)
action_index = max_Q
#print('Action {}'.format(action_index))
a_t[max_Q] = 1
# We reduced the epsilon gradually to take more random actions
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
# run the selected action and observed next state and reward
# We need to find the neighbors to the geohash that we started at
#G et the neighbors from the current geohash - convert back to string
current_geohash_string = self.list_of_inverse_heohash_index[s_geohash]
neighbors = gh.neighbors(current_geohash_string)
# Get the direction we should go
direction_to_move_to = list_of_output_predictions_to_direction[action_index]
# Get the geohash of the direction we moved to
if direction_to_move_to =='stay':
new_geohash = starting_geohash # stay in current geohash, get the index of said geohash
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
# hash with the letters of the geohash above
new_geohash = self.list_of_geohash_index[starting_geohash]
else:
new_geohash = neighbors[direction_to_move_to]## give us the geohash to move to next
# get the reward of the geohash we just moved to (this is the ratio of fare /time of trip)
# time, geohash, list of tuple ( fare, time ,ratio)
possible_rewards = np.array(self.final_data_structure[s_time][new_geohash])
if len (possible_rewards) ==0:
r_t = -.1 # we do not have information for this time and geohash, don't go here. waste gass
fare_t = 0 # no information so the fare = 0
s_time1 = s_time+10 # assume this took ten minutes
else:
possible_rewards = np.random.randint(0,len(possible_rewards))
r_t = possible_rewards[possible_rewards][2] # get the ratio of fare / trip time
fare_t = possible_rewards[possible_rewards][0]
# get the trip length
s_time1 = s_time + possible_rewards[possible_rewards][1]
#r_t = np.random.choice(possible_rewards)
s_geohash1 = self.list_of_geohash_index[new_geohash]
# store the transition in D
if s_time1 <= 2350: # The last possible time | |
from djitellopy import Tello
import cv2
import socket
import numpy as np
import time
import datetime
import imutils
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
def initializeTello():
drone = Tello()
connection = drone.connect()
if connection:
drone.for_back_velocity = 0
drone.left_right_velocity = 0
drone.up_down_velocity = 0
drone.yaw_velocity = 0
drone.speed = 0
drone.streamoff()
drone.streamon()
print(f"\n\n\n\n\nBATTERY: {drone.get_battery()}")
return connection, drone
def telloGetFrame(drone):
telloFrame = drone.get_frame_read()
telloFrame = telloFrame.frame
return telloFrame
def findObjectHaar(img):
# prediction
faceCascade = cv2.CascadeClassifier('../Haar/haarcascade_frontalface_default.xml')
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.2,4)
myFaceListC = []
myFaceListArea = []
returnArray = []
for(x,y,w,h) in faces:
# drawing face boundary
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
# finding face center coordinate
cx = x + w//2
cy = y + h//2
area = w*h
myFaceListArea.append(area)
myFaceListC.append([cx, cy, w, h])
if len(myFaceListArea) != 0:
# finding closest face (biggest area)
i = myFaceListArea.index(max(myFaceListArea))
returnArray = [myFaceListC[i][0],myFaceListC[i][1],myFaceListC[i][3]]
return returnArray
else:
return ([(img.shape[1]//2),(img.shape[0]//2),200])
def initYOLO():
# YOLO STUFF
whT = 320 # A parameter for image to blob conversion
# Import class names to list from coco.names
classesFile = "../YOLOv3/a.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
# Set up model and network
modelConfig = "../YOLOv3/a.cfg"
modelWeights = "../YOLOv3/a.weights"
net = cv2.dnn.readNetFromDarknet(modelConfig, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
return classNames, net, whT
def progYOLO(img, net, whT):
blob = cv2.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False) #Convert Image to a format the network can take (blobs)
net.setInput(blob) #Input the image to the network
layerNames = net.getLayerNames() #Names of all layers in network (relu, conv etc...)
outputNames = [(layerNames[i[0] - 1]) for i in net.getUnconnectedOutLayers()] #Find names of output layers (3 layers)
outputs = net.forward(outputNames)# Returns outputs as numpy.2darray from the layernames forwarded.
# (rows , columns) = (boxnumber , ( 0-4 = cx,cy,w,h, score of how
# likely the box contain an object and how accurate the boundary
# box is, rest is probability per classes) )
return outputs
def findObjectYOLO(outputs, img, classNames, classNumber):
toTrack = classNames[classNumber]
# Neural Network Params
confThreshold = 0.3 # Lower value, more boxes (but worse confidence per box)
nmsThreshold = 0.5 # Lower value, less overlaps
hT, wT, _ = img.shape
bbox = []
classIndices = []
confs = []
returnIndices = []
returnArea = []
returnArray = []
for output in outputs: # Go through each output layer (3 layers)
for det in output: # Go through each detection in layers (rows per layer: 300 first layer, 1200 second layer, 4800 third layer)
scores = det[5:] # List of confidence scores/probability for each class
classIndex = np.argmax(scores) # Returns index of highest score
confidence = scores[classIndex] # Get the highest score.
if confidence > confThreshold:
w, h = int(det[2]*wT) , int(det[3]*hT)
x, y = int((det[0]*wT) - w/2), int((det[1]*hT) - h/2)
bbox.append([x,y,w,h])
classIndices.append(classIndex)
confs.append(float(confidence))
# Returns indices of boxes to keep when multiple box on same object. Finds box with highest probability, suppress rest.
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
for i in indices:
i = i[0] # Flatten indices, comes as a value in list. [val] --> val
box = bbox[i] # Get a box
x, y, w, h = box[0], box[1], box[2], box[3] # Extract x, y, width, height
area = w * h
if(classNames[classIndices[i]] == toTrack):
returnIndices.append(i)
if len(returnIndices) != 0:
# finding closest face (biggest area)
for i in returnIndices:
area = bbox[i][2] * bbox[i][3]
returnArea.append(area)
maxVal = returnArea.index(max(returnArea))
bbox[maxVal][0] = bbox[maxVal][0] + bbox[maxVal][2]/2
bbox[maxVal][1] = bbox[maxVal][1] + bbox[maxVal][3]/2
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,255), 2) # Draw bounding box
cv2.putText(img, f'{classNames[classIndices[i]].upper()} {int(confs[i]*100)}%',
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,255), 2) #Write class name and % on bounding box
returnArray = [bbox[i][0],bbox[i][1],bbox[i][3]]
return returnArray
else:
return ([(img.shape[1]//2),(img.shape[0]//2),200])
def findObjectHSV(img, minHSV=(28, 69, 100), maxHSV=(39, 237, 255)):
#define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
f = open("../HSV/threshold.txt", 'r')
threshold = f.read().split(',')
threshold = list(map(int, threshold))
minHSV = (threshold[0],threshold[1],threshold[2])
maxHSV = (threshold[3],threshold[4],threshold[5])
center = (0,0)
radius = 0
blurred = cv2.GaussianBlur(img, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, minHSV, maxHSV)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
((x,y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 20:
cv2.circle(img, (int(x), int(y)), int(radius), (0,255,255), 2)
radius = int(radius * 4.5)
return [center[0], center[1], radius]
return ([(img.shape[1]//2),(img.shape[0]//2),200])
def trackObject(drone, info, pInfo, w, h, pidY, pidX, pidZ, pidYaw, pError, sliderVal, frame, mode):
# mode default (True): True = Rotation, False = Translation
# w = 960
# h = 720
# Aspect ratio from tello 4:3
plotPID = [0,0,0] # speeds
error = [0,0,0] # left/right, for/back, up/down (pixels)
speed = [0,0,0,0] # left/right, for/back, updown, rotate
# current info
cx = info[0]
cy = info[1]
bh = info[2]
# previous info
pcx = pInfo[0]
pcy = pInfo[1]
pbh = pInfo[2]
# editable variables
percentH = 180 - (sliderVal-50)*4 # 1/6 * h + (sliderVal-50)*4 + h/10; exact value, 180 is an estimate for easier computation
# estimated head size = 25 cm, desired bounding box height = 1/6 frame height
# 720/6 = 120, 25 cm corresponds to 120 px, therefore: 1 cm = 4.8 px
# calculations
error[0] = (cx - w//2) / (w/2) * 100 # LEFT/RIGHT
error[1] = (bh - percentH)/percentH * 100 # FOR/BACK
error[2] = (cy - h//2) / (h/2) * 100 # UP/DOWN
# PID
if mode:
# rotation - Yaw
speed[3] = pidYaw[0]*error[0] + pidYaw[1]*(error[0]+pError[0]) + pidYaw[2]*(error[0]-pError[0])
speed[3] = int(np.clip(speed[3],-100, 100))
else:
# Y - left/right
speed[0] = pidY[0]*error[0] + pidY[1]*(error[0]+pError[0]) + pidY[2]*(error[0]-pError[0])
speed[0] = int(np.clip(speed[0],-100, 100))
# X - forward/back
speed[1] = ( (pidX[0]*error[1]) + (pidX[1]*(error[1]+pError[1])) + (pidX[2]*(error[1]-pError[1])) ) * (-1)
if speed[1] >= 0:
speed[1] = speed[1]*2
speed[1] = int(np.clip(speed[1],-100, 100))
# Z - up/down
speed[2] = ( (pidZ[0]*error[2]) + (pidZ[1]*(error[2]-pError[2])) + (pidZ[2]*(error[2]-pError[2])) ) * (-1)
speed[2] = int(np.clip(speed[2],-100, 100))
# Update speed
# Rotation / Translation
if mode:
# Rotation
drone.yaw_velocity = speed[3]
drone.left_right_velocity = 0
else:
# Translation
drone.left_right_velocity = speed[0]
drone.yaw_velocity = 0
# Forward / Back
drone.for_back_velocity = speed[1]
# Up / Down
drone.up_down_velocity = speed[2]
# Update movement
updateMovement(drone)
# Update values
pInfo = info
if mode:
plotPID = [speed[3], speed[1], speed[2]]
else:
plotPID = [speed[0], speed[1], speed[2]]
return pInfo, error, plotPID
def droneData(droneStates):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 8890))
count = 0
while True:
try:
data, server = sock.recvfrom(1518)
if count >= 10:
droneStates.pop(0)
droneStates.append(data.decode(encoding="utf-8"))
except Exception as err:
print(err)
sock.close
break
def drawOSD(droneStates, frame, pulse, mode, trackOn, classNames, classNumber, trackMethod):
# pitch:0;roll:0;yaw:0;vgx:0;vgy:0;vgz:0;templ:82;temph:85;tof:48;h:0;bat:20;baro:163.98;time:0;agx:6.00;agy:-12.00;agz:-1003.00;
states = droneStates[len(droneStates)-1].split(";")
pitch = states[0][6:]
roll = states[1][5:]
yaw = states[2][4:]
vgx = states[3][4:]
vgy = states[4][4:]
vgz = states[5][4:]
templ = states[6][6:] #
temph = states[7][6:] #
tof = float(states[8][4:])/100
height = float(states[9][2:])/100
bat = states[10][4:]
baro = int(float(states[11][5:]))
time = states[12][5:]
agx = float(states[13][4:])//10
agy = float(states[14][4:])//10
agz = float(states[15][4:])//10
avgTemp = (int(templ) + int(temph))//2
def battFill(percent, img):
percent = int(percent)
width = 66 / 100 * percent
start = 960
end = int(start + width)
if percent > 50:
color = (0,255,0)
elif percent > 20:
color = (0, 195, 255)
else:
color = (0,0,255)
if percent == 100:
batStart = 958
elif percent >= 10:
batStart = 968
else:
batStart = 980
cv2.rectangle(img, (start, 70), (end, 96), color, -1)
cv2.putText(img, str(percent)+"%" , (batStart, 91), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255,255,255), 1)
def placeIcon(img, iconPath, location, scale):
icon = cv2.imread(iconPath)
h, w, _ = icon.shape
icon = cv2.resize(icon, (int(h*scale),int(w*scale)))
h, w, _ = icon.shape
xStart = location[0] - int(w/2)
yStart = location[1] - int(h/2)
xEnd = xStart | |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME> and <NAME>.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states | |
in self.meal_plans['Present']:
for subitem in self.meal_plans['Present'][item]:
for subsubitem in self.meal_plans['Present'][item][subitem]:
self.totals[i] += int(self.get_nutrient(subsubitem, temp_nutrients[i][0]))
self.totals[i] /= 28
self.totals[i] = int(self.totals[i])
if self.nutrition_labels[i] != None:
self.nutrition_labels[i].setText(temp_nutrients[i][0] + ": " + str(self.totals[i]) + temp_nutrients[i][2])
def open_meal_manager(self):
global manager
if self.selected_week != "Past":
manager = MealPlanPanel(self, self.selected_week, self.selected_day)
manager.show()
def index_past_week(self, week_string):
week = "Past"
index = 0
items = [self.past_combobox.itemText(i) for i in range(self.past_combobox.count())]
for item in items:
if (item == week_string):
break
index += 1
self.change_week(week, index)
def update_calorie_goal(self):
self.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "meal_plans"))
self.calorie_goal = self.db_wrapper.fetch_local_column(self.table_name, "calorie_goal")
self.progress_bar.setMaximum(int(self.calorie_goal))
self.calorie_goal_label.setText(" ".join(["Daily Goal: ", self.calorie_goal, "kcal"]))
self.total_day_calorie = 0
if self.selected_week != "Past":
for i in self.meal_plans[self.selected_week][self.selected_day]:
for item in self.meal_plans[self.selected_week][self.selected_day][i]:
self.total_day_calorie += item["nutrition"]["nutrients"][1]["amount"]
else:
for i in self.meal_plans[self.selected_week][self.selected_past_week][self.selected_day]:
for item in self.meal_plans[self.selected_week][self.selected_past_week][self.selected_day][i]:
self.total_day_calorie += item["nutrition"]["nutrients"][1]["amount"]
self.progress_bar.setValue(self.total_day_calorie)
if int(self.total_day_calorie) > int(self.calorie_goal):
self.progress_bar.setValue(int(self.calorie_goal))
calories_left = self.progress_bar.maximum() - self.total_day_calorie
self.calorie_label.setText((str(calories_left) + " calories left from goal"))
if int(self.total_day_calorie) > int(self.calorie_goal):
self.calorie_label.setText(str(abs(calories_left)) + " calories over goal")
def change_week(self, week, past_week_index):
self.selected_week = week
self.selected_past_week = past_week_index
if self.daily_summary == True:
self.recalculate_daily_totals()
self.repopulate_table()
self.update_calorie_goal()
def change_day(self, day):
self.selected_day = day
if self.daily_summary == True:
self.recalculate_daily_totals()
self.repopulate_table()
self.update_calorie_goal()
def repopulate_table(self):
self.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "meal_plans"))
for i in range (self.table.rowCount()):
for j in range(self.table.columnCount()):
self.table.removeCellWidget(i, j)
if self.selected_week == "Past":
meals = self.meal_plans[self.selected_week][self.selected_past_week][self.selected_day]
else:
meals = self.meal_plans[self.selected_week][self.selected_day]
number_of_meals = len(meals)
meal_entries = [None] * number_of_meals
plus_button = [None] * number_of_meals
for i in range(number_of_meals):
plus_button[i] = QPushButton("+")
while self.table.columnCount() < number_of_meals:
self.table.insertColumn(1)
while self.table.columnCount() > number_of_meals:
self.table.removeColumn(1)
buttons = plus_button
j = i = k = 0
for item in meals:
buttons[i].setFlat(True)
buttons[i].clicked.connect(partial(self.add_button_func, self.selected_week, self.selected_day, item))
self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))
self.table.horizontalHeader().setSectionResizeMode(i, QHeaderView.Stretch)
for key in meals[item]:
widget = QLabel(str(int(key['amount'])) + "g " + key['name'])
remove_widget = QPushButton("x")
remove_widget.setFixedSize(24, 24)
remove_widget.clicked.connect(partial(self.remove_food, key, item))
helper_layout = QHBoxLayout()
helper_layout.addWidget(widget)
helper_layout.addWidget(remove_widget)
wrapper_widget = QWidget()
wrapper_widget.setLayout(helper_layout)
self.table.setCellWidget(k, j, wrapper_widget)
k += 1
if self.selected_week != 'Past':
self.table.setCellWidget(k, j, buttons[i])
j += 1
i += 1
k = 0
self.update_calorie_goal()
def remove_food(self, food, meal):
if self.selected_week == "Present":
self.db_wrapper.delete_food_from_meal(food['name'], meal, self.selected_day, True, False)
if self.selected_week == "Future":
self.db_wrapper.delete_food_from_meal(food, meal, self.selected_day, False, True)
self.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "meal_plans"))
if self.daily_summary == True:
self.recalculate_daily_totals()
else:
self.calculate_monthly_totals()
self.repopulate_table()
def get_nutrient(self, info, nutrient):
index = 0
for i in info["nutrition"]["nutrients"]:
if info["nutrition"]["nutrients"][index]["title"] == nutrient:
return info["nutrition"]["nutrients"][index]["amount"]
index += 1
return 0
def add_button_func(self, week, day, meal):
global panel
panel = FoodDBSearchPanel(self, week, day, meal)
panel.show()
def calculate_calorie_intake(self, weight, height, age, gender, activity, weight_goal):
bmr = self.calculate_bmr(weight, height, age, gender)
#maintain 100%, 0.25kg/w 90%, 0.5kg/w 79%, 1kg/w 59%
#"Sedentary", "Lightly active", "Moderately active", "Very active", "Extra active"
if activity == "Sedentary":
sedentary_factor = 1.2
bmr *= sedentary_factor
elif activity == "Lightly active":
light_factor = 1.375
bmr *= light_factor
elif activity == "Moderately active":
moderate_factor = 1.465
bmr *= moderate_factor
elif activity == "Active":
active_factor = 1.55
bmr *= active_factor
elif activity == "Very active":
very_active_factor = 1.725
bmr *= very_active_factor
elif activity == "Extra active":
extra_active_factor = 1.9
bmr *= extra_active_factor
if weight_goal == "0.25":
bmr *= 0.9
elif weight_goal == "0.5":
bmr *= 0.79
elif weight_goal == "1":
bmr *= 0.59
self.db_wrapper.update_table_column(self.table_name, "calorie_goal", int(bmr))
self.calorie_goal = bmr
self.calorie_goal = int(self.calorie_goal)
self.calorie_goal = str(self.calorie_goal)
self.calorie_goal_label.setText(" ".join(["Daily Goal: ", self.calorie_goal, "kcal"]))
self.update_calorie_goal()
def calculate_bmr(self, weight, height, age, gender):
#Only calculates base BMR, depending on exercise level, BMR will be multiplied
#Mifflin-St Jeor Equation
weight *= 10
height *= 6.25
age *= 5
bmr = weight + height - age
if gender == "female":
bmr -= 161
else:
bmr += 5
return int(bmr)
def show_intake_entry(self):
global entry
entry = EditDailyIntake(self)
entry.show()
class EditDailyIntake(QWidget):
def __init__(self, parent):
super().__init__()
self.this_parent = parent
self.db_wrapper = DatabaseWrapper()
self.table_name = "Nutrition"
self.setStyleSheet(
"""QWidget{
background-color: #232120;
color:#c7c7c7;
font-weight: bold;
font-family: Montserrat;
font-size: 16px;
}
QPushButton{
background-color: rgba(0, 0, 0, 0);
border: 1px solid;
font-size: 18px;
font-weight: bold;
border-color: #808080;
min-height: 28px;
white-space:nowrap;
text-align: left;
padding-left: 5%;
font-family: Montserrat;
}
QPushButton:hover:!pressed{
border: 2px solid;
border-color: #747474;
}
QPushButton:pressed{
border: 2px solid;
background-color: #323232;
border-color: #6C6C6C;
}
QLineEdit{
padding: 6px;
background-color: rgb(33,33,33);
border: 1px solid;
border-color: #cdcdcd;
}""")
dialog_layout = QVBoxLayout()
self.setWindowFlags(Qt.FramelessWindowHint)
dialog_layout.addLayout(self.create_input_window())
self.setLayout(dialog_layout)
def create_input_window(self):
layout = QVBoxLayout()
entry_label = QLabel("Edit Intake")
self.calorie_line_edit = QLineEdit()
self.calorie_line_edit.setValidator(QIntValidator())
helper_layout = QHBoxLayout()
cancel_button = QPushButton("Cancel")
cancel_button.clicked.connect(lambda: self.close_button())
confirm_button = QPushButton("Confirm")
confirm_button.clicked.connect(lambda: self.confirm_button())
helper_layout.addWidget(cancel_button)
helper_layout.addWidget(confirm_button)
layout.addWidget(entry_label)
layout.addWidget(self.calorie_line_edit)
layout.addLayout(helper_layout)
return layout
def close_button(self):
self.close()
def confirm_button(self):
if self.calorie_line_edit.text() != "":
self.db_wrapper.update_table_column(self.table_name, "calorie_goal", self.calorie_line_edit.text())
self.this_parent.update_calorie_goal()
self.close()
class FoodDBSearchPanel(QWidget):
def __init__(self, parentobj, week, day, meal):
super().__init__()
self.db_wrapper = DatabaseWrapper()
self.parentobj = parentobj
self.week = week
self.day = day
self.meal = meal
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool)
self.setWindowModality(Qt.ApplicationModal)
self.setMinimumWidth(430)
self.setStyleSheet(
"""QWidget{
background-color: #232120;
color:#c7c7c7;
font-weight: bold;
font-family: Montserrat;
font-size: 16px;
}
QPushButton{
background-color: rgba(0, 0, 0, 0);
border: 1px solid;
font-size: 18px;
font-weight: bold;
border-color: #808080;
min-height: 28px;
white-space:nowrap;
text-align: left;
padding-left: 5%;
font-family: Montserrat;
}
QPushButton:hover:!pressed{
border: 2px solid;
border-color: #747474;
}
QPushButton:pressed{
border: 2px solid;
background-color: #323232;
border-color: #6C6C6C;
}
QLineEdit{
padding: 6px;
background-color: rgb(33,33,33);
border: 1px solid;
border-color: #cdcdcd;
}
QScrollArea{
background-color: #1A1A1A;
}""")
layout = QVBoxLayout()
layout.addLayout(self.create_search_bar())
layout.addWidget(self.create_search_results())
layout.addLayout(self.create_confirm_cancel())
self.setLayout(layout)
def create_search_bar(self):
search_bar_layout = QHBoxLayout()
search_bar_line_edit = QLineEdit()
search_bar_line_edit.setPlaceholderText("Search")
self.search_bar_amount = QLineEdit()
self.search_bar_amount.setPlaceholderText("Amount")
self.search_bar_amount.setFixedWidth(120)
self.search_bar_amount.setValidator(QDoubleValidator())
search_icon = QIcon(icons["search"])
search_bar_button = QPushButton()
search_bar_button.setIcon(search_icon)
if self.db_wrapper.connection_exists:
search_bar_button.clicked.connect(lambda:self.update_search_results(search_bar_line_edit.text()))
search_bar_line_edit.returnPressed.connect(search_bar_button.click)
self.search_bar_amount.returnPressed.connect(search_bar_button.click)
search_bar_layout.addWidget(search_bar_line_edit)
search_bar_layout.addWidget(self.search_bar_amount)
search_bar_layout.addWidget(search_bar_button)
return search_bar_layout
def create_search_results(self):
self.result_layout = QVBoxLayout()
self.result_layout.setAlignment(Qt.AlignTop)
#with open('temp.json', 'r') as datafile:
# food_info_temp = json.load(datafile)
#response_button = [None] * len(food_info_temp)
#for i in range(len(food_info_temp)):
# response_button[i] = QPushButton(str(food_info_temp[i]["name"]) + " " + str(self.get_nutrient(food_info_temp[i], "Calories")))
# response_button[i].clicked.connect(partial(self.result_to_data, food_info_temp[i]))
# self.result_layout.addWidget(response_button[i])
self.scroll_area = QScrollArea()
self.scroll_area.setWidgetResizable(True)
widg = QWidget()
widg.setLayout(self.result_layout)
self.scroll_area.setWidget(widg)
self.scroll_area.setFixedSize(415, 550)
return self.scroll_area
def get_nutrient(self, info, nutrient):
index = 0
for i in info["nutrition"]["nutrients"]:
if info["nutrition"]["nutrients"][index]["title"] == nutrient:
return info["nutrition"]["nutrients"][index]["amount"]
index += 1
return 0
def update_search_results(self, query):
if self.search_bar_amount.text() != "":
for i in reversed(range(self.result_layout.count())):
self.result_layout.itemAt(i).widget().setParent(None)
api = FoodDatabase()
response = api.food_search(query, 512)
response_button = [None] * len(response)
food_info = [None] * len(response)
for i in range(len(response)):
food_info[i] = api.food_info(response[i]["id"], "g", float(self.search_bar_amount.text()))
response_button[i] = QPushButton(str(food_info[i]["name"]) + " " + str(self.get_nutrient(food_info[i], "Calories")))
response_button[i].clicked.connect(partial(self.result_to_data, food_info[i]))
self.result_layout.addWidget(response_button[i])
def create_confirm_cancel(self):
layout = QHBoxLayout()
cancel = QPushButton("Cancel")
cancel.clicked.connect(lambda:self.closefunc())
confirm = QPushButton("Confirm")
confirm.clicked.connect(lambda:self.closefunc())
layout.addWidget(cancel)
#layout.addWidget(confirm)
return layout
def result_to_data(self, select_response):
if self.week == "Present":
self.db_wrapper.update_meal(self.meal, select_response, self.day, True, False)
elif self.week == "Future":
self.db_wrapper.update_meal(self.meal, select_response, self.day, False, True)
#test stage
self.parentobj.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.parentobj.table_name, "meal_plans"))
self.parentobj.repopulate_table()
if self.parentobj.daily_summary == True:
self.parentobj.recalculate_daily_totals()
else:
self.parentobj.calculate_monthly_totals()
self.close()
return
def closefunc(self):
self.close()
class MealPlanPanel(QWidget):
def __init__(self, parent, week, day):
super().__init__()
self.this_parent = parent
self.week = week
self.day = day
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Tool)
self.setWindowModality(Qt.ApplicationModal)
self.db_wrapper = DatabaseWrapper()
self.setStyleSheet(
"""QWidget{
background-color: #232120;
color:#c7c7c7;
font-weight: bold;
font-family: Montserrat;
font-size: 16px;
}
QPushButton{
background-color: rgba(0, 0, 0, 0);
border: 1px solid;
font-size: 18px;
font-weight: bold;
border-color: #808080;
min-height: 28px;
white-space:nowrap;
text-align: left;
padding-left: 5%;
font-family: Montserrat;
}
QPushButton:hover:!pressed{
border: 2px solid;
border-color: #747474;
}
QPushButton:pressed{
border: 2px solid;
background-color: #323232;
border-color: #6C6C6C;
}
QLineEdit{
padding: 6px;
background-color: rgb(33,33,33);
border: 1px solid;
border-color: #cdcdcd;
}
QScrollArea{
background-color: #1A1A1A;
}""")
self.table_name = "Nutrition"
self.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "meal_plans"))
self.this_layout = self.create_main_panel()
self.setFixedSize(250, 500)
#self.layout.addLayout(self.create_main_panel())
self.setLayout(self.this_layout)
def create_main_panel(self):
layout = QVBoxLayout()
self.meal_number = len(self.meal_plans[self.week][self.day])
self.horizontal_layouts = [None] * len(self.meal_plans[self.week][self.day])
self.meal_labels = [None] * len(self.meal_plans[self.week][self.day])
self.meal_rename = [None] * len(self.meal_plans[self.week][self.day])
self.meal_remove = [None] * len(self.meal_plans[self.week][self.day])
add_button = QPushButton("Add (" + str(self.meal_number) + "/7)")
add_button.clicked.connect(lambda:self.add_meal())
close_button = QPushButton("Close")
close_button.clicked.connect(lambda:self.close())
j = 0
for i in self.meal_plans[self.week][self.day]:
self.horizontal_layouts[j] = QHBoxLayout()
self.meal_labels[j] = QLabel(i)
self.meal_remove[j] = QPushButton("-")
self.meal_remove[j].setFixedSize(32, 32)
self.meal_rename[j] = QPushButton("E")
self.meal_rename[j].setFixedSize(32, 32)
self.meal_remove[j].clicked.connect(partial(self.remove_meal, i))
self.meal_rename[j].clicked.connect(partial(self.open_rename_query, i))
self.horizontal_layouts[j].addWidget(self.meal_labels[j])
self.horizontal_layouts[j].addWidget(self.meal_rename[j])
self.horizontal_layouts[j].addWidget(self.meal_remove[j])
layout.addLayout(self.horizontal_layouts[j])
j += 1
layout.addWidget(add_button)
layout.addWidget(close_button)
return layout
def update(self):
self.meal_plans = json.loads(self.db_wrapper.fetch_local_column(self.table_name, "meal_plans"))
self.meal_number = len(self.meal_plans[self.week][self.day])
for i in range(len(self.horizontal_layouts)):
for j in reversed(range(self.horizontal_layouts[i].count())):
self.horizontal_layouts[i].itemAt(j).widget().setParent(None)
for i in reversed(range(self.this_layout.count())):
item = self.this_layout.itemAt(i)
if item.widget() != None:
item.widget().setParent(None)
self.horizontal_layouts = [None] * len(self.meal_plans[self.week][self.day])
self.meal_labels = [None] * len(self.meal_plans[self.week][self.day])
self.meal_rename = [None] * len(self.meal_plans[self.week][self.day])
self.meal_remove = [None] * len(self.meal_plans[self.week][self.day])
add_button = QPushButton("Add (" + str(self.meal_number) + "/7)")
add_button.clicked.connect(lambda:self.add_meal())
close_button = QPushButton("Close")
| |
import numpy as np
import numba
import scipy
import scipy.interpolate
from near_finder.utilities import fourier_derivative_1d
from near_finder.utilities import interp_fourier as _interp
from near_finder.utilities import have_better_fourier
if have_better_fourier:
from near_finder.utilities import interp_fourier2 as _interp2
def compute_local_coordinates(cx, cy, x, y, newton_tol=1e-12,
interpolation_scheme='nufft', guess_ind=None, verbose=False, max_iterations=30):
"""
Find (s, r) given (x, y) using the coordinates:
x = X(s) + r n_x(s)
y = Y(s) + r n_y(s)
Where X, Y is given by cx, cy
Uses a NUFFT based scheme if interpolation_scheme = 'nufft'
And a polynomail based scheme if interpolation_scheme = 'polyi'
The NUFFT scheme is more accurate and robust;
The polynomial based scheme is more accurate
"""
if interpolation_scheme == 'nufft':
return compute_local_coordinates_nufft(cx, cy, x, y, newton_tol, guess_ind, verbose, max_iterations)
elif interpolation_scheme == 'polyi':
return compute_local_coordinates_polyi(cx, cy, x, y, newton_tol, guess_ind, verbose, max_iterations)
else:
raise Exception('interpolation_scheme not recognized')
def compute_local_coordinates_nufft(cx, cy, x, y, newton_tol=1e-12,
guess_ind=None, verbose=False, max_iterations=30):
"""
Find using the coordinates:
x = X + r n_x
y = Y + r n_y
"""
xshape = x.shape
yshape = y.shape
x = x.flatten()
y = y.flatten()
n = cx.shape[0]
dt = 2*np.pi/n
ts = np.arange(n)*dt
ik = 1j*np.fft.fftfreq(n, dt/(2*np.pi))
# tangent vectors
xp = fourier_derivative_1d(f=cx, d=1, ik=ik, out='f')
yp = fourier_derivative_1d(f=cy, d=1, ik=ik, out='f')
xpp = fourier_derivative_1d(f=cx, d=2, ik=ik, out='f')
ypp = fourier_derivative_1d(f=cy, d=2, ik=ik, out='f')
# speed
sp = np.sqrt(xp*xp + yp*yp)
isp = 1.0/sp
# unit tangent vectors
tx = xp*isp
ty = yp*isp
# unit normal vectors
nx = ty
ny = -tx
# interpolation routines for the necessary objects
if have_better_fourier:
def interp(f):
return _interp2(f)
else:
def interp(f):
return _interp(f, x.size)
nc_i = interp(nx + 1j*ny)
c_i = interp(cx + 1j*cy)
cp_i = interp(xp + 1j*yp)
cpp_i = interp(xpp + 1j*ypp)
# function for computing (d^2)_s and its derivative
def f(t, x, y):
C = c_i(t)
X = C.real
Y = C.imag
Cp = cp_i(t)
Xp = Cp.real
Yp = Cp.imag
return Xp*(X-x) + Yp*(Y-y), X, Y, Xp, Yp
def fp(t, x, y, X, Y, Xp, Yp):
Cpp = cpp_i(t)
Xpp = Cpp.real
Ypp = Cpp.imag
return Xpp*(X-x) + Ypp*(Y-y) + Xp*Xp + Yp*Yp
# brute force find of guess_inds if not provided (slow!)
if guess_ind is None:
guess_ind = np.empty(x.size, dtype=int)
multi_guess_ind_finder(cx, cy, x, y, guess_ind)
# get starting guess
t = ts[guess_ind]
# begin Newton iteration
rem, X, Y, Xp, Yp = f(t, x, y)
mrem = np.abs(rem).max()
if verbose:
print('Newton tol: {:0.2e}'.format(mrem))
iteration = 0
while mrem > newton_tol:
J = fp(t, x, y, X, Y, Xp, Yp)
delt = -rem/J
line_factor = 1.0
while True:
t_new = t + line_factor*delt
rem_new, X, Y, Xp, Yp = f(t_new, x, y)
mrem_new = np.abs(rem_new).max()
try:
rem_new, X, Y, Xp, Yp = f(t_new, x, y)
mrem_new = np.abs(rem_new).max()
testit = True
except:
testit = False
if testit and ((mrem_new < (1-0.5*line_factor)*mrem) or line_factor < 1e-4):
t = t_new
# put theta back in [0, 2 pi]
t[t < 0] += 2*np.pi
t[t > 2*np.pi] -= 2*np.pi
rem = rem_new
mrem = mrem_new
break
line_factor *= 0.5
if verbose:
print('Newton tol: {:0.2e}'.format(mrem))
iteration += 1
if iteration > max_iterations:
raise Exception('Exceeded maximum number of iterations solving for coordinates .')
# need to determine the sign now
C = c_i(t)
X = C.real
Y = C.imag
if True: # use nx, ny from guess_inds to determine sign
NX = nx[guess_ind]
NY = ny[guess_ind]
else: # interpolate to get these
NC = nc_i(t)
NX = NC.real
NY = NC.imag
r = np.hypot(X-x, Y-y)
xe1 = X + r*NX
ye1 = Y + r*NY
err1 = np.hypot(xe1-x, ye1-y)
xe2 = X - r*NX
ye2 = Y - r*NY
err2 = np.hypot(xe2-x, ye2-y)
sign = (err1 < err2).astype(int)*2 - 1
return t, r*sign
snodes = np.linspace(-1, 1, 6)
MAT = np.empty([6,6], dtype=float)
for i in range(6):
MAT[:,i] = np.power(snodes,i)
IMAT = np.linalg.inv(MAT)
@numba.njit(fastmath=True)
def _polyi(f, xx):
n = f.size
h = 2*np.pi/n
ix = int(xx//h)
lb = (ix-2)*h
ub = (ix+3)*h
scalex = 2*((xx-lb)/(ub-lb) - 0.5)
p2 = scalex*scalex
p3 = p2*scalex
p4 = p3*scalex
p5 = p4*scalex
fout = 0.0
for j in range(6):
fout += IMAT[0, j]*f[(ix+j-2) % n]
coef = 0.0
for j in range(6):
coef += IMAT[1, j]*f[(ix+j-2) % n]
fout += coef*scalex
coef = 0.0
for j in range(6):
coef += IMAT[2, j]*f[(ix+j-2) % n]
fout += coef*p2
coef = 0.0
for j in range(6):
coef += IMAT[3, j]*f[(ix+j-2) % n]
fout += coef*p3
coef = 0.0
for j in range(6):
coef += IMAT[4, j]*f[(ix+j-2) % n]
fout += coef*p4
coef = 0.0
for j in range(6):
coef += IMAT[5, j]*f[(ix+j-2) % n]
fout += coef*p5
return fout
@numba.njit(fastmath=True)
def _polyi(f, xx):
n = f.size
h = 2*np.pi/n
ix = int(xx//h)
lb = (ix-2)*h
ub = (ix+3)*h
scalex = 2*((xx-lb)/(ub-lb) - 0.5)
F = np.empty(6, np.float64)
for j in range(6):
F[j] = f[(ix+j-2) % n]
A = IMAT.dot(F)
return A[0] + A[1]*scalex + A[2]*scalex**2 + A[3]*scalex**3 + A[4]*scalex**4 + A[5]*scalex**5
@numba.njit(fastmath=True)
def _polyi_p(f, xx):
n = f.size
h = 2*np.pi/n
ix = int(xx//h)
lb = (ix-2)*h
ub = (ix+3)*h
scalex = 2*((xx-lb)/(ub-lb) - 0.5)
dscale = 2.0/(ub-lb)
F = np.empty(6, np.float64)
for j in range(6):
F[j] = f[(ix+j-2) % n]
A = IMAT.dot(F)
# calculate value
O0 = A[0] + A[1]*scalex + A[2]*scalex**2 + A[3]*scalex**3 + A[4]*scalex**4 + A[5]*scalex**5
# calculate derivative
O1 = A[1] + 2*A[2]*scalex + 3*A[3]*scalex**2 + 4*A[4]*scalex**3 + 5*A[5]*scalex**4
return O0, O1*dscale
@numba.njit(fastmath=True)
def _polyi_pp(f, xx):
n = f.size
h = 2*np.pi/n
ix = int(xx//h)
lb = (ix-2)*h
ub = (ix+3)*h
scalex = 2*((xx-lb)/(ub-lb) - 0.5)
dscale = 2.0/(ub-lb)
F = np.empty(6, np.float64)
for j in range(6):
F[j] = f[(ix+j-2) % n]
A = IMAT.dot(F)
# calculate value
O0 = A[0] + A[1]*scalex + A[2]*scalex**2 + A[3]*scalex**3 + A[4]*scalex**4 + A[5]*scalex**5
# calculate derivative
O1 = A[1] + 2*A[2]*scalex + 3*A[3]*scalex**2 + 4*A[4]*scalex**3 + 5*A[5]*scalex**4
# calculate second derivative
O2 = 2*A[2] + 6*A[3]*scalex + 12*A[4]*scalex**2 + 20*A[5]*scalex**3
return O0, O1*dscale, O2*dscale*dscale
# @numba.njit(fastmath=True)
# def _polyi_(f, xx):
# n = f.size
# h = 2*np.pi/n
# ix = int(xx//h)
# ratx = xx/h - (ix+0.5)
# fout = f[(ix - 2) % n] * (3/256 + ratx*( -9/1920 + ratx*( -5/48/2 + ratx*( 1/8/6 + ratx*( 1/2/24 - 1/8/120*ratx)))))
# fout += f[(ix - 1) % n] * (-25/256 + ratx*( 125/1920 + ratx*( 39/48/2 + ratx*(-13/8/6 + ratx*(-3/2/24 + 5/8/120*ratx)))))
# fout += f[(ix ) % n] * (150/256 + ratx*(-2250/1920 + ratx*(-34/48/2 + ratx*( 34/8/6 + ratx*( 2/2/24 - 10/8/120*ratx)))))
# fout += f[(ix + 1) % n] * (150/256 + ratx*( 2250/1920 + ratx*(-34/48/2 + ratx*(-34/8/6 + ratx*( 2/2/24 + 10/8/120*ratx)))))
# fout += f[(ix + 2) % n] * (-25/256 + ratx*( -125/1920 + ratx*( 39/48/2 + ratx*( 13/8/6 + ratx*(-3/2/24 - 5/8/120*ratx)))))
# fout += f[(ix + 3) % n] * (3/256 + ratx*( 9/1920 + ratx*( -5/48/2 + ratx*( -1/8/6 + ratx*( 1/2/24 + 1/8/120*ratx)))))
# return fout
@numba.njit(parallel=True, fastmath=True)
def multi_polyi(f, xx, out):
for i in numba.prange(xx.size):
out[i] = _polyi(f, xx[i])
@numba.njit(parallel=True, fastmath=True)
def multi_polyi_p(f, xx, out, outp):
for i in numba.prange(xx.size):
out[i], outp[i] = _polyi_p(f, xx[i])
@numba.njit(fastmath=True)
def _f(t, _cx, _cy, x, y):
cx, cxp = _polyi_p(_cx, t)
cy, cyp = _polyi_p(_cy, t)
return cxp*(cx-x) + cyp*(cy-y)
@numba.njit(fastmath=True)
def _f_fp(t, _cx, _cy, x, y):
cx, cxp, cxpp = _polyi_pp(_cx, t)
cy, cyp, cypp = _polyi_pp(_cy, t)
f = cxp*(cx-x) + cyp*(cy-y)
fp = cxpp*(cx-x) + cypp*(cy-y) + cxp*cxp + cyp*cyp
return f, fp
@numba.njit(fastmath=True)
def _newton(t, xi, yi, newton_tol, _cx, _cy, verbose, maxi):
# get initial residual
rem, fp = _f_fp(t, _cx, _cy, xi, yi)
# Newton iteration
iteration = 0
while np.abs(rem) > newton_tol:
# compute step
t = t -rem/fp
rem, fp = _f_fp(t, _cx, _cy, xi, yi)
iteration += 1
if iteration > maxi:
raise Exception('Exceeded maximum number of iterations solving for coordinates.')
if t < 0: t += 2*np.pi
if t >= 2*np.pi: t -= 2*np.pi
return t
@numba.njit(parallel=False, fastmath=True)
def _multi_newton(its, x, y, newton_tol, _cx, _cy, verbose, maxi):
N = x.size
| |
<= _text_meter_sofar_size:
_text_meter_total_size = 0
_text_meter_sofar_size = 0
text_progress_meter = TextMeter
class MultiFileHelper(BaseMeter):
def __init__(self, master):
BaseMeter.__init__(self)
self.master = master
def _do_start(self, now):
self.master.start_meter(self, now)
def _do_update(self, amount_read, now):
# elapsed time since last update
self.master.update_meter(self, now)
def _do_end(self, amount_read, now):
self.ftotal_time = format_time(now - self.start_time)
self.ftotal_size = format_number(self.last_amount_read)
self.master.end_meter(self, now)
def failure(self, message, now=None):
self.master.failure_meter(self, message, now)
def message(self, message):
self.master.message_meter(self, message)
class _FakeLock:
def acquire(self):
pass
def release(self):
pass
class MultiFileMeter:
helperclass = MultiFileHelper
def __init__(self, threaded=True):
self.meters = []
self.in_progress_meters = []
if threaded:
self._lock = thread.allocate_lock()
else:
self._lock = _FakeLock()
self.update_period = 0.3 # seconds
self.numfiles = None
self.finished_files = 0
self.failed_files = 0
self.open_files = 0
self.total_size = None
self.failed_size = 0
self.start_time = None
self.finished_file_size = 0
self.last_update_time = None
self.re = RateEstimator()
def start(self, numfiles=None, total_size=None, now=None):
if now is None: now = time.time()
self.numfiles = numfiles
self.finished_files = 0
self.failed_files = 0
self.open_files = 0
self.total_size = total_size
self.failed_size = 0
self.start_time = now
self.finished_file_size = 0
self.last_update_time = now
self.re.start(total_size, now)
self._do_start(now)
def _do_start(self, now):
pass
def end(self, now=None):
if now is None: now = time.time()
self.re.update(self._amount_read(), now)
self._do_end(now)
def _do_end(self, now):
pass
def lock(self): self._lock.acquire()
def unlock(self): self._lock.release()
###########################################################
# child meter creation and destruction
def newMeter(self):
newmeter = self.helperclass(self)
self.meters.append(newmeter)
return newmeter
def removeMeter(self, meter):
self.meters.remove(meter)
###########################################################
# child functions - these should only be called by helpers
def start_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
if not meter in self.in_progress_meters:
self.in_progress_meters.append(meter)
self.open_files += 1
finally:
self._lock.release()
self._do_start_meter(meter, now)
def _do_start_meter(self, meter, now):
pass
def update_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
if (not self.last_update_time or
(now >= self.last_update_time + self.update_period)):
self.re.update(self._amount_read(), now)
self.last_update_time = now
self._do_update_meter(meter, now)
def _do_update_meter(self, meter, now):
pass
def end_meter(self, meter, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
try: self.in_progress_meters.remove(meter)
except ValueError: pass
self.open_files -= 1
self.finished_files += 1
self.finished_file_size += meter.last_amount_read
finally:
self._lock.release()
self._do_end_meter(meter, now)
def _do_end_meter(self, meter, now):
pass
def failure_meter(self, meter, message, now):
if not meter in self.meters:
raise ValueError('attempt to use orphaned meter')
self._lock.acquire()
try:
try: self.in_progress_meters.remove(meter)
except ValueError: pass
self.open_files -= 1
self.failed_files += 1
if meter.size and self.failed_size is not None:
self.failed_size += meter.size
else:
self.failed_size = None
finally:
self._lock.release()
self._do_failure_meter(meter, message, now)
def _do_failure_meter(self, meter, message, now):
pass
def message_meter(self, meter, message):
pass
########################################################
# internal functions
def _amount_read(self):
tot = self.finished_file_size
for m in self.in_progress_meters:
tot += m.last_amount_read
return tot
class TextMultiFileMeter(MultiFileMeter):
def __init__(self, fo=sys.stderr, threaded=True):
self.fo = fo
MultiFileMeter.__init__(self, threaded)
self.index_time = self.index = 0
# files: ###/### ###% data: ######/###### ###% time: ##:##:##/##:##:##
# New output, like TextMeter output...
# update: No size (minimal: 17 chars)
# -----------------------------------
# (<#file>/<#tot files>): <text> <rate> | <current size> <elapsed>
# 8-48 1 8 3 6 1 7-9 5
#
# update: Size, All files
# -----------------------
# (<#file>/<#tot files>): <text> <pc> <bar> <rate> | <size> <eta time> ETA
# 8-22 1 3-4 1 6-12 1 8 3 6 1 7-9 1 3 1
# end
# ---
# <text> | <file size> <file elapsed time>
# 8-56 3 6 1 9 5
def _do_update_meter(self, meter, now):
self._lock.acquire()
try:
df = self.finished_files
tf = self.numfiles or 1
# Don't use "percent of files complete" ...
# pf = 100 * float(df)/tf + 0.49
dd = self.re.last_amount_read
td = self.re.total
pd = 100 * (self.re.fraction_read() or 0) + 0.49
dt = self.re.elapsed_time()
rt = self.re.remaining_time()
frac = self.re.fraction_read() or 0
pf = 100 * frac
ave_dl = format_number(self.re.average_rate())
# cycle through active meters
if now > self.index_time:
self.index_time = now + 1.0
self.index += 1
if self.index >= len(self.meters):
self.index = 0
meter = self.meters[self.index]
text = meter.text or meter.basename
if tf > 1:
text = '(%u/%u): %s' % (df+1+self.index, tf, text)
# Include text + ui_rate in minimal
tl = TerminalLine(8, 8+1+8)
if tl._llen > 80:
use_hours = True # For big screens, make it more readable.
else:
use_hours = False
ui_size = tl.add(' | %5sB' % format_number(dd))
if not self.re.total:
ui_time = tl.add(' %s' % format_time(dt, use_hours))
ui_end = tl.add(' ' * 5)
ui_rate = tl.add(' %5sB/s' % ave_dl)
out = '\r%-*.*s%s%s%s%s\r' % (tl.rest(), tl.rest(), text,
ui_rate, ui_size, ui_time, ui_end)
else:
ui_time = tl.add(' %s' % format_time(rt, use_hours))
ui_end = tl.add(' ETA ')
ui_sofar_pc = tl.add(' %i%%' % pf,
full_len=len(" (100%)"))
ui_rate = tl.add(' %5sB/s' % ave_dl)
# Make text grow a bit before we start growing the bar too
blen = 4 + tl.rest_split(8 + 8 + 4)
ui_bar = _term_add_bar(tl, blen, frac)
out = '\r%-*.*s%s%s%s%s%s%s\r' % (tl.rest(), tl.rest(), text,
ui_sofar_pc, ui_bar,
ui_rate, ui_size, ui_time,
ui_end)
self.fo.write(out)
self.fo.flush()
finally:
self._lock.release()
def _do_end_meter(self, meter, now):
self._lock.acquire()
try:
format = "%-30.30s %6.6s %8.8s %9.9s"
fn = meter.text or meter.basename
size = meter.last_amount_read
fsize = format_number(size) + 'B'
et = meter.re.elapsed_time()
frate = format_number(et and size / et) + 'B/s'
df = self.finished_files
tf = self.numfiles or 1
total_size = format_number(size)
text = meter.text or meter.basename
if tf > 1:
text = '(%u/%u): %s' % (df, tf, text)
tl = TerminalLine(8)
if tl._llen > 80:
use_hours = True # For big screens, make it more readable.
else:
use_hours = False
ui_size = tl.add(' | %5sB' % total_size)
ui_time = tl.add(' %s' % format_time(et, use_hours))
ui_end, not_done = _term_add_end(tl, meter.size, size)
out = '\r%-*.*s%s%s%s\n' % (tl.rest(), tl.rest(), text,
ui_size, ui_time, ui_end)
self.fo.write(out)
finally:
self._lock.release()
def _do_failure_meter(self, meter, message, now):
self._lock.acquire()
try:
format = "%-30.30s %6.6s %s"
fn = meter.text or meter.basename
if isinstance(message, string_types):
message = message.splitlines()
if not message: message = ['']
out = '%-79s' % (format % (fn, 'FAILED', message[0] or ''))
self.fo.write('\r' + out + '\n')
for m in message[1:]: self.fo.write(' ' + m + '\n')
self._lock.release()
finally:
self._do_update_meter(meter, now)
def message_meter(self, meter, message):
self._lock.acquire()
try:
pass
finally:
self._lock.release()
######################################################################
# support classes and functions
class RateEstimator:
def __init__(self, timescale=5.0):
self.timescale = timescale
def start(self, total=None, now=None):
if now is None: now = time.time()
self.total = total
self.start_time = now
self.last_update_time = now
self.last_amount_read = 0
self.ave_rate = None
def update(self, amount_read, now=None):
if now is None: now = time.time()
# libcurl calls the progress callback when fetching headers
# too, thus amount_read = 0 .. hdr_size .. 0 .. content_size.
# Ocassionally we miss the 2nd zero and report avg speed < 0.
# Handle read_diff < 0 here. BZ 1001767.
if amount_read == 0 or amount_read < self.last_amount_read:
# if we just started this file, all bets are off
self.last_update_time = now
self.last_amount_read = amount_read
self.ave_rate = None
return
#print 'times', now, self.last_update_time
time_diff = now - self.last_update_time
read_diff = amount_read - self.last_amount_read
# First update, on reget is the file size
if self.last_amount_read:
self.last_update_time = now
self.ave_rate = self._temporal_rolling_ave(
time_diff, read_diff, self.ave_rate, self.timescale)
self.last_amount_read = amount_read
#print 'results', time_diff, read_diff, self.ave_rate
#####################################################################
# result methods
def average_rate(self):
"get the average transfer rate (in bytes/second)"
return self.ave_rate
def elapsed_time(self):
"the time between the start of the transfer and the most recent update"
return self.last_update_time - self.start_time
def remaining_time(self):
"estimated time remaining"
if not self.ave_rate or not self.total: return None
return (self.total - self.last_amount_read) / self.ave_rate
def fraction_read(self):
"""the fraction of the data that has been read
(can be None for unknown transfer size)"""
if self.total is None: return None
elif self.total == 0: return 1.0
else: return float(self.last_amount_read)/self.total
#########################################################################
# support methods
def _temporal_rolling_ave(self, time_diff, read_diff, last_ave, timescale):
"""a temporal rolling average performs smooth averaging even when
updates come at irregular intervals. This is performed by scaling
the "epsilon" according to the time since the last update.
Specifically, epsilon = time_diff / timescale
As a general rule, the average will take on a completely new value
after 'timescale' seconds."""
epsilon = time_diff / timescale
if epsilon > 1: epsilon = 1.0
return self._rolling_ave(time_diff, read_diff, last_ave, epsilon)
def _rolling_ave(self, time_diff, read_diff, last_ave, epsilon):
"""perform | |
when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
if self._context_layer:
# Shape: (batch_size, document_length, encoding_dim)
raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)
# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
else:
raw_contextualized_embeddings = text_embeddings
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
span_embeddings_list = list()
attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)
span_embeddings_list += [attended_span_embeddings]
if self._endpoint_span_extractor is not None:
# Shape: (batch_size, num_spans, embedding_size)
endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
span_embeddings_list += [endpoint_span_embeddings]
span_embeddings = torch.cat(span_embeddings_list, -1)
# event_scores = self._event_classifier.forward(span_embeddings)
# Shape: (batch_size, num_spans, num_event_realis_label)
# Shape: (batch_size, num_spans, num_event_realis_label)
# event_realis_scores = self._event_realis_classifier.forward(span_embeddings)
# Prune based on mention scores.
num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))
(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep_according_doc_len,
)
event_embeddings = self._get_event_embedding(span_mask)
top_mask = top_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)
# top_span_embeddings = top_span_embeddings.detach()
# top_span_mention_scores = top_span_mention_scores.detach()
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
_generate_valid_antecedents(num_spans_to_keep_according_doc_len,
max_antecedents,
util.get_device_of(text_mask))
if self._type_refine_gate is not None:
top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,
valid_antecedent_indices).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(
event_embeddings,
candidate_antecedent_embeddings)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# (batch_size, event_type_size, 1)
event_type_prior_scores = self._event_scorer(event_embeddings)
# (batch_size, num_spans_to_keep, event_type_size)
event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(
candidate_antecedent_mention_scores.size(0),
candidate_antecedent_mention_scores.size(1),
-1)
# (batch_size, num_spans_to_keep, event_type_size + max_antecedents)
candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,
candidate_antecedent_mention_scores],
-1)
# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {"top_spans": top_spans,
"antecedent_indices": valid_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
"coreference_scores": coreference_scores,
}
if coref_labels is not None and event_type_labels is not None:
pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)
type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),
top_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
type_antecedent_labels,
antecedent_labels)
bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),
(event_type_labels > 0).float()) * span_mask
bce_loss = bce_loss.sum() * self._bce_loss_weight
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
if self._pretrain_ed:
# All antecedent mask is 0
top_mask = top_mask.expand_as(coreference_scores).clone()
top_mask[:, :, self._positive_label_size + 2:] = 0
coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight
output_dict["loss"] = coref_loss + bce_loss
decoded_result = self.decode(output_dict)
pred_label_spans_list = decoded_result['pred_label_spans']
gold_label_spans_list = [m['gold_label_spans'] for m in metadata]
self._mention_f1_score(pred_label_spans_list,
gold_label_spans_list,
)
self._conll_coref_scores(decoded_result['clusters'],
metadata,
pred_label_spans_list,
gold_label_spans_list)
self._type_loss_metric(bce_loss.item())
self._coref_loss_metric(negative_marginal_log_likelihood.item())
else:
self._coref_loss_metric(0.)
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
output_dict["offset"] = [x["token_offset"] for x in metadata]
output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
return node_decode(output_dict,
self.vocab, decoding_algorithm=self._decoding,
positive_label_size=self._positive_label_size,
type_threshold=self._type_threshold)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_result = self._mention_f1_score.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {"c_p": coref_precision,
"c_r": coref_recall,
"c_f1": coref_f1,
"m_p": mention_result['precision'],
"m_r": mention_result['recall'],
"m_f1": mention_result['f1-score'],
"nil": self._nil_label_metric.get_metric(reset),
"type": self._type_label_metric.get_metric(reset),
"coref": self._coref_label_metric.get_metric(reset),
"t_l": self._type_loss_metric.get_metric(reset),
"c_l": self._coref_loss_metric.get_metric(reset),
"a_f1": (mention_result['f1-score'] + coref_f1) / 2.}
@staticmethod
def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor):
"""
event_embeddings: ``torch.FloatTensor``, required.
Embedding representations of the event types. Has shape
(batch_size, event_type_size, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
return:
(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
"""
event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
event_embeddings.size(1),
antecedent_embeddings.size(3),))
return torch.cat([event_embeddings, antecedent_embeddings], 2)
def _compute_span_pair_embeddings(self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
---------- shape
(batch_size, event_type_size, embedding_size).
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding | |
self.xmu + self.xr
self.ird_min = -999
self.ird_max = 999
self.irq_min = -999
self.irq_max = 999
self.irq_off = zeros(self.n, 1)
self.u0 = mul(self.u, self.ugen)
# self.omega_ref0 = ones(self.n, 1)
self.mva_mega = 100000000.0
def init1(self, dae):
"""New initialization function"""
self.servcall(dae)
retval = True
mva = self.system.mva
self.p0 = mul(self.p0, self.gammap)
self.q0 = mul(self.q0, self.gammaq)
dae.y[self.vsd] = mul(dae.y[self.v], -sin(dae.y[self.a]))
dae.y[self.vsq] = mul(dae.y[self.v], cos(dae.y[self.a]))
rs = matrix(self.rs)
rr = matrix(self.rr)
xmu = matrix(self.xmu)
x1 = matrix(self.xs) + xmu
x2 = matrix(self.xr) + xmu
Pg = matrix(self.p0)
Qg = matrix(self.q0)
Vc = dae.y[self.v]
vsq = dae.y[self.vsq]
vsd = dae.y[self.vsd]
toSn = div(mva, self.Sn) # to machine base
toSb = self.Sn / mva # to system base
# rotor speed
omega = 1 * (ageb(mva * Pg, self.Sn)) + \
mul(0.5 + 0.5 * mul(Pg, toSn),
aandb(agtb(Pg, 0), altb(mva * Pg, self.Sn))) + \
0.5 * (aleb(mva * Pg, 0))
slip = 1 - omega
theta = mul(self.Kp, mround(1000 * (omega - 1)) / 1000)
theta = mmax(theta, 0)
# prepare for the iterations
irq = mul(-x1, toSb, (2 * omega - 1), div(1, Vc), div(1, xmu),
div(1, omega))
isd = zeros(*irq.size)
isq = zeros(*irq.size)
# obtain ird isd isq
for i in range(self.n):
A = sparse([[-rs[i], vsq[i]], [x1[i], -vsd[i]]])
B = matrix([vsd[i] - xmu[i] * irq[i], Qg[i]])
linsolve(A, B)
isd[i] = B[0]
isq[i] = B[1]
ird = -div(vsq + mul(rs, isq) + mul(x1, isd), xmu)
vrd = -mul(rr, ird) + mul(
slip,
mul(x2, irq) + mul(xmu, isq)) # todo: check x1 or x2
vrq = -mul(rr, irq) - mul(slip, mul(x2, ird) + mul(xmu, isd))
# main iterations
for i in range(self.n):
mis = ones(6, 1)
rows = [0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5]
cols = [0, 1, 3, 0, 1, 2, 2, 4, 3, 5, 0, 1, 2]
x = matrix([isd[i], isq[i], ird[i], irq[i], vrd[i], vrq[i]])
# vals = [-rs, x1, xmu, -x1, -rs, -xmu, -rr,
# -1, -rr, -1, vsd, vsq, -xmu * Vc / x1]
vals = [
-rs[i], x1[i], xmu[i], -x1[i], -rs[i], -xmu[i], -rr[i], -1,
-rr[i], -1, vsd[i], vsq[i], -xmu[i] * Vc[i] / x1[i]
]
jac0 = spmatrix(vals, rows, cols, (6, 6), 'd')
iter = 0
while max(abs(mis)) > self.system.tds.config.tol:
if iter > 20:
logger.error(
'Initialization of DFIG <{}> failed.'.format(
self.name[i]))
retval = False
break
mis[0] = -rs[i] * x[0] + x1[i] * x[1] + xmu[i] * x[3] - vsd[i]
mis[1] = -rs[i] * x[1] - x1[i] * x[0] - xmu[i] * x[2] - vsq[i]
mis[2] = -rr[i] * x[2] + slip[i] * (
x2[i] * x[3] + xmu[i] * x[1]) - x[4]
mis[3] = -rr[i] * x[3] - slip[i] * (
x2[i] * x[2] + xmu[i] * x[0]) - x[5]
mis[4] = vsd[i] * x[0] + vsq[i] * x[1] + x[4] * x[2] + \
x[5] * x[3] - Pg[i]
mis[5] = -xmu[i] * Vc[i] * x[2] / x1[i] - \
Vc[i] * Vc[i] / x1[i] - Qg[i]
rows = [2, 2, 3, 3, 4, 4, 4, 4]
cols = [1, 3, 0, 2, 2, 3, 4, 5]
vals = [
slip[i] * xmu[i], slip[i] * x2[i], -slip[i] * xmu[i],
-slip[i] * x2[i], x[4], x[5], x[2], x[3]
]
jac = jac0 + spmatrix(vals, rows, cols, (6, 6), 'd')
linsolve(jac, mis)
x -= mis
iter += 1
isd[i] = x[0]
isq[i] = x[1]
ird[i] = x[2]
irq[i] = x[3]
vrd[i] = x[4]
vrq[i] = x[5]
dae.x[self.ird] = mul(self.u0, ird)
dae.x[self.irq] = mul(self.u0, irq)
dae.y[self.isd] = isd
dae.y[self.isq] = isq
dae.y[self.vrd] = vrd
dae.y[self.vrq] = vrq
dae.x[self.omega_m] = mul(self.u0, omega)
dae.x[self.theta_p] = mul(self.u0, theta)
dae.y[self.pwa] = mmax(mmin(2 * dae.x[self.omega_m] - 1, 1), 0)
self.vref0 = mul(
aneb(self.KV, 0), Vc - div(ird + div(Vc, xmu), self.KV))
dae.y[self.vref] = self.vref0
k = mul(div(x1, Vc, xmu, omega), toSb)
self.irq_off = -mul(k, mmax(mmin(2 * omega - 1, 1), 0)) - irq
# electrical torque in pu
te = mul(
xmu,
mul(dae.x[self.irq], dae.y[self.isd]) - mul(
dae.x[self.ird], dae.y[self.isq]))
for i in range(self.n):
if te[i] < 0:
logger.error(
'Pe < 0 on bus <{}>. Wind speed initialize failed.'.
format(self.bus[i]))
retval = False
# wind power in pu
pw = mul(te, omega)
dae.y[self.pw] = pw
# wind speed initialization loop
R = 4 * pi * self.system.freq * mul(self.R, self.ngb,
div(1, self.npole))
AA = pi * self.R**2
vw = 0.9 * self.Vwn
for i in range(self.n):
mis = 1
iter = 0
while abs(mis) > self.system.tds.config.tol:
if iter > 50:
logger.error(
'Wind <{}> init failed. '
'Try increasing the nominal wind speed.'.
format(self.wind[i]))
retval = False
break
pw_iter, jac = self.windpower(self.ngen[i], self.rho[i], vw[i],
AA[i], R[i], omega[i], theta[i])
mis = pw_iter - pw[i]
inc = -mis / jac[1]
vw[i] += inc
iter += 1
# set wind speed
dae.x[self.vw] = div(vw, self.Vwn)
lamb = div(omega, vw, div(1, R))
ilamb = div(1,
(div(1, lamb + 0.08 * theta) - div(0.035, theta**3 + 1)))
cp = 0.22 * mul(
div(116, ilamb) - 0.4 * theta - 5, exp(div(-12.5, ilamb)))
dae.y[self.lamb] = lamb
dae.y[self.ilamb] = ilamb
dae.y[self.cp] = cp
self.system.rmgen(self.gen)
if not retval:
logger.error('DFIG initialization failed')
return retval
def windpower(self, ngen, rho, vw, Ar, R, omega, theta, derivative=False):
mva_mega = self.system.mva * 1e6
lamb = omega * R / vw
ilamb = 1 / (1 / (lamb + 0.08 * theta) - 0.035 / (theta**3 + 1))
cp = 0.22 * (116 / ilamb - 0.4 * theta - 5) * exp(-12.5 / ilamb)
pw = 0.5 * ngen * rho * cp * Ar * vw**3 / mva_mega
a1 = exp(-12.5 / ilamb)
a2 = (lamb + 0.08 * theta)**2
a3 = 116. / ilamb - 0.4 * theta - 5
a4 = -9.28 / (lamb + 0.08 * theta) ** 2 + \
12.180 * theta * theta / (theta ** 3 + 1) ** 2 - 0.4
a5 = 1.000 / (lamb + 0.08 * theta) ** 2 - \
1.3125 * theta * theta / (theta ** 3 + 1) ** 2
jac = ones(1, 3)
jac[0] = ngen * R * a1 * rho * vw * vw * Ar * (
-12.760 + 1.3750 * a3) / a2 / mva_mega
jac[1] = ngen * (omega * R * (12.760 - 1.3750 * a3) / a2 +
0.330 * a3 * vw) * vw * Ar * rho * a1 / mva_mega
jac[2] = ngen * 0.110 * rho * (
a4 + a3 * a5) * a1 * Ar * vw**3 / mva_mega
return pw, jac
@property
def phi(self):
deg1 = pi / 180
dae = self.system.dae
above = agtb(dae.x[self.omega_m], 1)
phi_degree_step = mfloor((dae.x[self.omega_m] - 1) / deg1) * deg1
return mul(phi_degree_step, above)
def gcall(self, dae):
dae.g[self.isd] = -dae.y[self.vsd] + mul(
dae.x[self.irq], self.xmu) + mul(dae.y[self.isq], self.x0) - mul(
dae.y[self.isd], self.rs)
dae.g[self.isq] = -dae.y[self.vsq] - mul(
dae.x[self.ird], self.xmu) - mul(dae.y[self.isd], self.x0) - mul(
dae.y[self.isq], self.rs)
dae.g[self.vrd] = -dae.y[self.vrd] + mul(
1 - dae.x[self.omega_m],
mul(dae.x[self.irq], self.x1) + mul(
dae.y[self.isq], self.xmu)) - mul(dae.x[self.ird], self.rr)
dae.g[self.vrq] = -dae.y[self.vrq] - mul(
dae.x[self.irq], self.rr) - mul(
1 - dae.x[self.omega_m],
mul(dae.x[self.ird], self.x1) + mul(dae.y[self.isd], self.xmu))
dae.g[self.vsd] = -dae.y[self.vsd] - mul(dae.y[self.v],
sin(dae.y[self.a]))
dae.g[self.vsq] = -dae.y[self.vsq] + mul(dae.y[self.v],
cos(dae.y[self.a]))
dae.g[self.vref] = self.vref0 - dae.y[self.vref]
dae.g[self.pwa] = mmax(mmin(2 * dae.x[self.omega_m] - 1, 1),
0) - dae.y[self.pwa]
dae.hard_limit(self.pwa, 0, 1)
dae.g[self.pw] = -dae.y[self.pw] + mul(
0.5, dae.y[self.cp], self.ngen, pi, self.rho, (self.R)**2,
(self.Vwn)**3, div(1, self.mva_mega), (dae.x[self.vw])**3)
dae.g[self.cp] = -dae.y[self.cp] + mul(
-1.1 + mul(25.52, div(1, dae.y[self.ilamb])) + mul(
-0.08800000000000001, dae.x[self.theta_p]),
exp(mul(-12.5, div(1, dae.y[self.ilamb]))))
dae.g[self.lamb] = -dae.y[self.lamb] + mul(
4, self.R, self.fn, self.ngb, dae.x[self.omega_m], pi,
div(1, self.Vwn), div(1, self.npole), div(1, dae.x[self.vw]))
dae.g[self.ilamb] = div(
1,
div(1, dae.y[self.lamb] + mul(0.08, dae.x[self.theta_p])) + mul(
-0.035, div(1, 1 +
(dae.x[self.theta_p])**3))) - dae.y[self.ilamb]
dae.g += spmatrix(
mul(
self.u0, -mul(dae.x[self.ird], dae.y[self.vrd]) - mul(
dae.x[self.irq], dae.y[self.vrq]) - mul(
dae.y[self.isd], dae.y[self.vsd]) - mul(
dae.y[self.isq], dae.y[self.vsq])), self.a,
[0] * self.n, (dae.m, 1), 'd')
| |
promoted if i in multigpu_list] # 2gpu job ['3','3','4','4','10']
demoted_1gpu = [i for i in demoted if i not in multigpu_list] # 1gpu job
demoted_2gpu = [i for i in demoted if i in multigpu_list] # 2gpu job
condition1 = len(K80_avail) >= len(demoted) and 2*len(K80_2gpu) >= len(demoted_2gpu)
condition2 = len(V100_avail) >= len(promoted) and 2*len(V100_2gpu) >= len(promoted_2gpu)
if condition1 and condition2:
return None
else:
print('Notice: promoted/demoted jobs cannot fit in their destination due to locality', file=run_log, flush=True)
print('Remove all 2-gpu jobs from this migration decision', file=run_log, flush=True) # meaning they stay wherever they were before
for job in promoted_2gpu:
promoted.remove(job)
for job in demoted_2gpu:
demoted.remove(job)
for gpu_pair in K80_2gpu:
for gpu in gpu_pair:
K80_avail.remove(gpu)
for gpu_pair in V100_2gpu:
for gpu in gpu_pair:
V100_avail.remove(gpu)
# check if need to remove 1-gpu jobs as well
if len(K80_avail) < len(demoted_1gpu):
diff = len(demoted_1gpu) - len(K80_avail)
for i in range(diff):
removed_1gpu = demoted[0]
demoted.remove(removed_1gpu)
# also need to remove its corresponding GPU
V100_avail.remove(demoted_V100_map_1gpu[removed_1gpu])
elif len(V100_avail) < len(promoted_1gpu):
diff = len(promoted_1gpu) - len(V100_avail)
for i in range(diff):
removed_1gpu = promoted[0]
promoted.remove(removed_1gpu)
# also need to remove its corresponding GPU
K80_avail.remove(promoted_K80_map_1gpu[removed_1gpu])
return K80_avail, V100_avail, promoted, demoted
#locality_check(['2'],['2'],['44'],['48'])
# input: a list of jobs
# output: a dict of jobs with their remaining time on K80 and V100
# the remaining time on the other GPU type need to include migration overhead
# 1. ovhd_total: the mean is average migration overhead once
# 2. 1st_ovhd: extra time spent on 1st epoch after migration
# the returned dict looks like this {'50': [300, 150], '78': [1000, 300]}
# if a job can't be migrated yet (not in step1_job list) it shouldn't be in the input list
# elif a job can be migrated but have not been migrated or have been migration but does not have speedup yet
# , it should have the other gpu type remaining time as migration overhead
def get_remaining_time(job_list):
result_dict = {}
for job in job_list:
if job not in step1_job:
raise ValueError('Bug with promotion scheme, more jobs than free gpus')
# use prediction for remaining time on non-birth GPU
# also use a general migration overhead
elif job in step1_job and job not in step2_job:
mig_overhead = 40
K80_remain = job_remaining_batch[job] * K80_batch_time[job]
V100_remain = job_remaining_batch[job] * V100_batch_time[job]
K80_pred = job_remaining_batch[job] * K80_batch_pred[job]
V100_pred = job_remaining_batch[job] * V100_batch_pred[job]
# this is not accurate, but just to force job to run on the other GPU type not profiled
if birthplace[job] in K80_node:
result_dict[job] = [K80_remain, V100_pred + mig_overhead]
elif birthplace[job] in V100_node:
result_dict[job] = [K80_pred + mig_overhead, V100_remain]
else: # job has its K80_batch_time and V100_batch_time profiled
K80_remain = job_remaining_batch[job] * K80_batch_time[job]
V100_remain = job_remaining_batch[job] * V100_batch_time[job]
K80_mig_ovhd = np.mean(ovhd_total[job]) + K80_1st_ovhd[job]
V100_mig_ovhd = np.mean(ovhd_total[job]) + V100_1st_ovhd[job]
if job in list(K80_job.values()):
result_dict[job] = [K80_remain, V100_remain + V100_mig_ovhd]
elif job in list(V100_job.values()):
result_dict[job] = [K80_remain + K80_mig_ovhd, V100_remain]
return result_dict
#d, e, f = random_promotion(['0','1','4','8'], ['3','3','1','1'], [])
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
global pid_dict
pid = pid_dict['job'+job]
send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000'
global ovhd_start
ovhd_start[job] = time.time()
time.sleep(3) # in case epoch_waste is communicate too frequently
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
# start job
def start_job(node, gpu, job):
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
############### first clear finish status of all jobs ####################
pid_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
pid_dict[job_name] = 0
checkpoint_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
checkpoint_dict[job_name] = 0
ckpt_qual_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
for i in range(len(queue)):
job_name = 'job' + str(i + 1)
epoch_waste_dict[job_name] = 0
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address), file=run_log, flush=True)
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
global K80_start_time
global V100_start_time, promote_start_time
global K80_job
global V100_job
global K80_time
global V100_time
global ovhd_a, ovhd_b, ovhd_c, ovhd_d, ovhd_start, overhead, ovhd_total, v100_1st, k80_1st
global b_start, c_start, d_start, completion
global step1_job, step2_job
global V100_batch_time, K80_batch_time, job_remaining_batch, speedup_dict
global K80_1st_ovhd, V100_1st_ovhd
if 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
finish_dict[job_name] = 1
JCT[job] = int(time.time() - job_start[job])
if job in list(K80_job.values()):
K80_time[job] += int(time.time() - K80_start_time[job])
elif job in list(V100_job.values()):
V100_time[job] += int(time.time() - V100_start_time[job])
elif 'pid' in data_str:
global pid_dict
job_name = data_str.split(' ')[0]
pid = data_str.split(' ')[2]
pid_dict[job_name] = pid
elif 'checkpoint' in data_str: # can only be received after save signal is sent
global checkpoint_dict
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
checkpoint_dict[job_name] = 1
ovhd_a[job].append(int(time.time() - ovhd_start[job]))
b_start[job] = time.time()
elif 'waste' in data_str:
global epoch_waste_dict
job_name = data_str.split(' ')[0]
epoch_waste_time = data_str.split(' ')[2]
epoch_waste_dict[job_name] += int(epoch_waste_time)
elif 'b_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_b[job].append(int(time.time() - b_start[job]))
c_start[job] = time.time()
elif 'c_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_c[job].append(int(time.time() - c_start[job]))
d_start[job] = time.time()
elif 'd_end' in data_str:
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_d[job].append(int(time.time() - d_start[job]))
ovhd_total[job].append(int(time.time() - ovhd_start[job]))
if ovhd_start[job] != 0:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
if job in list(K80_job.values()):
K80_start_time[job] = time.time()
elif job in list(V100_job.values()):
V100_start_time[job] = time.time()
promote_start_time[job] = time.time()
elif '1st_epoch' in data_str: # 'job50 1st_epoch 35'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
epoch_time = int(data_str.split(' ')[2])
if job in list(K80_job.values()):
k80_1st[job].append(epoch_time)
elif job in list(V100_job.values()):
v100_1st[job].append(epoch_time)
elif 'completion' in data_str: # 'job50 completion 0.33'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
completion_portion = float(data_str.split(' ')[2])
completion[job] = completion_portion
elif 'batch_time' in data_str: # 'job50 batch_time 0.042'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
batch_time = float(data_str.split(' ')[2])
# also step1_job and step2_job
# if job birthplace is K80, K80_batch_time is collected, then step1 complete
if job in list(K80_job.values()) and K80_batch_time[job] == 0:
K80_batch_time[job] = batch_time
if birthplace[job] in K80_node:
step1_job.append(job)
elif birthplace[job] in V100_node:
step2_job.append(job)
speedup_dict[job] = round(K80_batch_time[job] / V100_batch_time[job], 3)
elif job in list(V100_job.values()) and V100_batch_time[job] == 0:
V100_batch_time[job] = batch_time
if birthplace[job] in V100_node:
step1_job.append(job)
elif birthplace[job] in K80_node:
step2_job.append(job)
speedup_dict[job] = round(K80_batch_time[job] / V100_batch_time[job], 3)
elif 'remain_batch' in data_str: # 'job50 remain_batch 156300'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
remaining_batch = int(data_str.split(' ')[2])
job_remaining_batch[job] = remaining_batch
elif '1st_ovhd' in data_str: # 'job50 1st_ovhd 4.99'
job_name = data_str.split(' ')[0]
job = job_name.replace('job','')
ovhd_time = float(data_str.split(' ')[2])
if job in list(K80_job.values()) and K80_1st_ovhd[job] == 0:
K80_1st_ovhd[job] = ovhd_time
elif job in list(V100_job.values()) and V100_1st_ovhd[job] == 0:
V100_1st_ovhd[job] = ovhd_time
# if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str:
# print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 and V100 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job, file=run_log, flush=True)
for gpu, job in V100_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
V100_used -= 1
V100_job[gpu] = 'idle'
print('V100 finished job: ' + job, file=run_log, flush=True)
if job in demote_list:
demote_list.remove(job)
################ | |
<reponame>ludaavics/yogam
import codecs
import logging
import re
from enum import Enum
from math import ceil, floor
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Match,
Optional,
Tuple,
Union,
cast,
)
from urllib.parse import unquote, urljoin, urlparse
import requests
from bs4 import BeautifulSoup # type: ignore
from fake_useragent import UserAgent # type: ignore
from .. import db
from ..models import Listing, Property
from . import exceptions
from .proxies import all_proxies
logger = logging.getLogger(__name__)
# https://stackoverflow.com/a/24519338
ESCAPE_SEQUENCE_RE = re.compile(
r"""
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)""",
re.UNICODE | re.VERBOSE,
)
def _to_seloger_geographical_code(post_code: str) -> Tuple[str, str]:
"""
Convert French 'Code Postal' to seloger's appropriate custom geographical code.
Args:
post_code: standard French post codes.
Returns:
- 'cp' or 'ci', the type of geographical code returned
- the actual geographical code, in seloger's nomenclatura.
Raises:
ValueError if we don't get a match in seloger's database of codes.
RuntimeError if we get more than two matches.
"""
post_code = str(post_code)
url = (
f"https://autocomplete.svc.groupe-seloger.com/api/v2.0/auto/complete/fra"
f"/63/10/8/SeLoger?text={post_code}"
)
response = requests.get(url)
cities = response.json()
matches = []
for city in cities:
if city.get("Type", None) == "Group":
if post_code == str(city.get("Params", {}).get("cp", "")):
matches = [("cp", post_code)]
break
if post_code in city.get("Meta", {}).get("Zips", []):
matches.append(("ci", city["Params"]["ci"]))
continue
if post_code == str(city.get("Params", {}).get("cp", "")):
matches.append(("cp", post_code))
if not matches:
msg = f"Unknown post code '{post_code}'."
raise ValueError(msg)
if len(matches) == 2:
# for department that match large cities (e.g. Paris, Lyon) we can get 2 matches
# One for the department and one for the group of arrondissements
# we arbitrarily return the department code
if set([geo_type for geo_type, geo_code in matches]) != {
"cp",
"ci",
}: # pragma: no cover
msg = (
f"Got multiple matches for post code '{post_code}'. "
"This should never happen!"
)
raise RuntimeError(msg)
matches = [match for match in matches if match[0] == "cp"]
if len(matches) > 2: # pragma: no cover
msg = (
f"Got multiple matches for post code '{post_code}'. "
"This should never happen!"
)
raise RuntimeError(msg)
return matches[0]
def decode_escapes(s: str) -> str:
def decode_match(match: Match) -> str:
return codecs.decode(match.group(0), "unicode-escape")
try:
escaped = ESCAPE_SEQUENCE_RE.sub(decode_match, s)
except TypeError:
msg = f"Could not escape '{s}'."
raise TypeError(msg)
return escaped
class Transaction(Enum):
rent = 1
buy = 2
class PropertyType(Enum):
apartment = 1
house = 2
parking = 3
land = 4
store = 6
business = 7
office = 8
loft = 9
apartment_building = 11
other_building = 12
castle = 13
mansion = 14
program = 15
def seloger(
transaction: str,
post_codes: Union[str, Iterable[str]],
property_types: Union[str, Iterable[str]] = ["apartment", "house"],
min_price: Optional[float] = None,
max_price: Optional[float] = None,
min_size: Optional[float] = None,
max_size: Optional[float] = None,
min_rooms: Optional[float] = None,
max_rooms: Optional[float] = None,
min_beds: Optional[float] = None,
max_beds: Optional[float] = None,
num_results: int = 100,
max_duplicates: int = 25,
timeout: int = 5,
) -> Dict[str, Union[List[str], List[Listing]]]:
"""
Scrape all listing matching search criteria.
Args:
transaction: one of {'rent', 'buy'}
post_codes: list of post codes.
property_types: subest of {'apartment', 'house', 'parking',
'land', 'store', 'business', 'office', 'loft',
'apartment_building', 'other_building', 'castle',
'mansion', 'program'}.
min_price: minimum requested price.
max_price: maximum requested price.
min_size: minimum property size, in square meters.
max_size: maximum property size, in square meters.
min_rooms: minimum number of rooms.
max_rooms: maximum number of rooms.
min_beds: minimum number of bedrooms.
max_beds: maximum number of bedrooms.
num_results: keep scraping until we add this many result to the database.
max_duplicates: keep scraping until we see this many consecutive listings
that are already in our database.
timeout: maximum amount of time, in seconds, to wait for a page to load.
Returns:
a dictionary of "added", "seen" and "failed" listings.
"""
allowed_transactions = cast(Iterable[str], Transaction._member_names_)
if transaction not in allowed_transactions:
msg = (
f"Unknown transaction '{transaction}'. Expected one of "
f"{', '.join(allowed_transactions)}"
)
raise ValueError(msg)
transaction = cast(str, Transaction[transaction].value)
if isinstance(post_codes, str):
post_codes = [post_codes]
if isinstance(property_types, str):
property_types = [property_types]
property_types = [p.lower() for p in property_types]
allowed_property_types = cast(Iterable[str], PropertyType._member_names_)
for property_type in property_types:
if property_type not in allowed_property_types:
msg = (
f"Unknown property_type '{property_type}'. Expected one of "
f"{', '.join(allowed_property_types)}"
)
raise ValueError(msg)
property_types = [
PropertyType[property_type].value for property_type in property_types
]
# we cast to int's here instead of requesting in the signature because scrapers
# of other sources may accept floats.
min_price = floor(min_price) if min_price is not None else min_price
max_price = ceil(max_price) if max_price is not None else max_price
min_size = floor(min_size) if min_size is not None else min_size
max_size = ceil(max_size) if max_size is not None else max_size
min_rooms = floor(min_rooms) if min_rooms is not None else min_rooms
max_rooms = ceil(max_rooms) if max_rooms is not None else max_rooms
min_beds = floor(min_beds) if min_beds is not None else min_beds
max_beds = ceil(max_beds) if max_beds is not None else max_beds
# convert code postal to se loger's internal coade
seloger_codes = [_to_seloger_geographical_code(cp) for cp in post_codes]
ci = [geo_code for geo_type, geo_code in seloger_codes if geo_type == "ci"]
cp = [geo_code for geo_type, geo_code in seloger_codes if geo_type == "cp"]
# fetch all the listings already processed
already_done_urls = [
l[0]
for l in db.session.query(Listing.url).filter(Listing.source == "seloger").all()
]
# build the search url
search_url = "https://www.seloger.com/list.html"
max_rooms = max_rooms + 1 if max_rooms is not None else 10
max_beds = min(max_beds + 1 if max_beds is not None else 10, max_rooms - 1)
params: Dict[str, Union[float, str]] = {
"projects": transaction,
"types": ",".join(map(str, property_types)),
"places": "["
+ "|".join([f"{{ci:{_ci}}}" for _ci in ci])
+ "|".join([f"{{cp:{_cp}}}" for _cp in cp])
+ "]",
"price": f"{min_price or 0}/{max_price or 'NaN'}",
"surface": f"{min_size or 0}/{max_size or 'NaN'}",
"rooms": ",".join(map(str, range(min_rooms or 0, max_rooms))),
"bedrooms": ",".join(map(str, range(min_beds or 0, max_beds))),
"enterprise": 0,
"qsVersion": 1.0,
}
if transaction == Transaction.buy.value:
params.update(
{"natures": "1,2"}
) # ancien, neuf. we exclude viager, project de construction
# user agent generator
ua = UserAgent()
# get a pool of proxies
proxy_pool = all_proxies(infinite=True)
added_listings: List[Listing] = []
seen_listings: List[Listing] = []
failed_listings: List[str] = []
scraped = 0
consecutive_duplicates = 0
page_num = 0
while (scraped < num_results) and (consecutive_duplicates < max_duplicates):
# get a page of results
if page_num != 0:
params.update({"LISTING-LISTpg": page_num + 1})
search_attempts = 0
max_search_attempts = 50
while search_attempts < max_search_attempts:
headers = {"user-agent": ua.random}
proxy = next(proxy_pool)
proxies = {"http": proxy, "https": proxy}
try:
page = requests.get(
search_url,
headers=headers,
params=params,
proxies=proxies,
timeout=timeout,
)
except requests.exceptions.RequestException:
search_attempts += 1
continue
if "captcha" in urlparse(page.url).path:
search_attempts += 1
continue
break
soup = BeautifulSoup(page.text, "html.parser")
is_seloger = r".*seloger.com.*" # exclude sponsored external listings
links = [
link["href"]
for link in soup.find_all(
"a", attrs={"name": "classified-link", "href": re.compile(is_seloger)}
)
]
links = [urljoin(link, urlparse(link).path) for link in links]
if not links:
break
# scrape each of the listings on the page
total = len(links)
done = [False for _ in range(total)]
msg = (
f"Starting the scrape of {total} listings "
f"fetched from {unquote(page.url)} ."
)
logger.info(msg)
previous_round = -1
while sum(done) > previous_round:
previous_round = sum(done)
for i, link in enumerate(links):
if done[i]:
continue
if link in already_done_urls:
msg = f"Skipping link #{i}, as it is already in our DB: {link}."
logger.debug(msg)
done[i] = True
consecutive_duplicates += 1
seen_listings.append(link)
continue
msg = f"Scraping link #{i}: {link} ..."
logger.debug(msg)
proxies = {"http": proxy, "https": proxy}
try:
listing, is_new = _seloger(
link,
headers={"User-Agent": ua.random},
proxies=proxies,
timeout=timeout,
)
except requests.exceptions.RequestException as e:
msg = f"👻Failed to retrieve the page ({type(e).__name__}).👻"
logger.debug(msg)
proxy = next(proxy_pool)
continue
except exceptions.ListingParsingError as e:
logger.debug(e)
continue
except Exception:
# we don't want to interrupt the program, but we don't want to
# silence the unexpected error.
msg = f"💥Unpexpected error.💥"
logging.exception(msg)
continue
msg = f"💫Scrape suceeded.💫"
logger.debug(msg)
done[i] = True
if is_new:
consecutive_duplicates = 0
added_listings.append(listing)
else:
# should be rare, but is possible when the same | |
be
monitored, whereas *ALL_OUTPUT_PORTS* specifies that *every* OutputPort of every Mechanism be monitored.
|
The default for the **monitor_for_control** argument is *MonitoredOutputPortsOption.PRIMARY_OUTPUT_PORTS*.
The OutputPorts specified in the **monitor_for_control** argument are added to any already specified for the
ControlMechanism's `objective_mechanism <ControlMechanism.objective_mechanism>`, and the full set is listed in
the ControlMechanism's `monitored_output_ports <EVCControlMechanism.monitored_output_ports>` attribute, and its
ObjectiveMechanism's `monitored_output_ports <ObjectiveMechanism.monitored_output_ports>` attribute).
..
* **control_signals** argument -- used to specify the parameters of Components in the System to be controlled. These
can be specified in any of the ways used to `specify ControlSignals <ControlMechanism_Control_Signals>` in the
*control_signals* argument of a ControlMechanism. These are added to any `ControlSignals <ControlSignal>` that have
already been specified for the `controller <System.controller>` (listed in its `control_signals
<ControlMechanism.control_signals>` attribute), and any parameters that have directly been `specified for
control <ParameterPort_Specification>` within the System (see `System_Control` below for additional details).
COMMENT
.. _Composition_Controller_Execution:
Controller Execution
====================
The `controller <Composition.controller>` is executed only if the Composition's `enable_controller
<Composition.enable_controller>` attribute is True. This generally done automatically when the `controller
<Composition.controller>` is `assigned <Composition_Controller_Assignment>`. If enabled, the `controller
<Composition.controller>` is generally executed either before or after all of the other Components in the Composition
have been executed, as determined by the Composition's `controller_mode <Composition.controller_mode>` attribute.
However, the Composition's `controller_condition <Composition.controller_condition>` attribute can be used to
customize when it is executed. All three of these attributes can be specified in corresponding arguments of the
Composition's constructor, or programmatically after it is constructed by assigning the desired value to the
attribute.
COMMENT:
For Developers
--------------
.. _Composition_Execution_Contexts_Init:
Initialization of Execution Contexts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- The parameter values for any execution context can be copied into another execution context by using \
Component._initialize_from_context, which when called on a Component copies the values for all its parameters \
and recursively for all of the Component's `_dependent_components <Component._dependent_components>`
- `_dependent_components <Component._dependent_components>` should be added to for any new Component that requires \
other Components to function properly (beyond "standard" things like Component.function, \
or Mechanism.input_ports, as these are added in the proper classes' _dependent_components)
- the intent is that with ``_dependent_components`` set properly, calling \
``obj._initialize_from_context(new_context, base_context)`` should be sufficient to run obj \
under **new_context**
- a good example of a "nonstandard" override is `OptimizationControlMechanism._dependent_components`
.. _Composition_TIming:
*Timing*
========
When `run <Composition.run>` is called by a Composition, it calls that Composition's `execute <Composition.execute>`
method once for each `input <Composition_Run_Inputs>` (or set of inputs) specified in the call to `run
<Composition.run>`, which constitutes a `TRIAL` of execution. For each `TRIAL`, the Component makes repeated calls
to its `scheduler <Composition.scheduler>`, executing the Components it specifies in each
`TIME_STEP`, until every Component has been executed at least once or another `termination condition
<Scheduler_Termination_Conditions>` is met. The `scheduler <Composition.scheduler>` can be
used in combination with `Condition` specifications for individual Components to execute different Components at
different time scales.
Runtime Params
COMMENT
.. _Composition_Learning:
Learning in a Composition
-------------------------
* `Composition_Learning_Standard`
* `Composition_Learning_AutodiffComposition`
* `Composition_Learning_UDF`
Learning is used to modify the `Projections <Projection>` between Mechanisms in a Composition. More specifically,
it modifies the `matrix <MappingProjection.matrix>` parameter of those `MappingProjections <MappingProjection>`,
which implements the strengths ("weights") of the associations between representations in the Mechanisms they connect.
There are three ways of implementing learning in a Composition: i) using `standard PsyNeuLink Components
<Composition_Learning_Standard>`; ii) using the `AutodiffComposition <Composition_Learning_AutodiffComposition>` -- a
specialized subclass of Composition that executes learning using `PyTorch <https://pytorch.org>`_; and iii) by using
`UserDefinedFunctions <UserDefinedFunction>`. The advantage of using standard PsyNeuLink compoments is that it
assigns each operation involved in learning to a dedicated Component. This helps make clear exactly what those
operations are, the sequence in which they are carried out, and how they interact with one another. However,
this can also make execution inefficient, due to the "overhead" incurred by distributing the calculations over
different Components. If more efficient computation is critical, then the `AutodiffComposition` can be used to
execute a compatible PsyNeuLink Composition in PyTorch, or one or more `UserDefinedFunctions <UserDefinedFunction>`
can be assigned to either PyTorch functions or those in any other Python environment that implements learning and
accepts and returns tensors. Each of these approaches is described in more detail below.
.. _Composition_Learning_Standard:
*Learning Using PsyNeuLink Components*
======================================
* `Composition_Learning_Unsupervised`
* `Composition_Learning_Supervised`
When learning is `implemented using standard PsyNeuLink Components <Composition_Learning_Standard>`, each calculation
and/or operation involved in learning -- including those responsible for computing errors, and for using those to
modify the Projections between Mechanisms, is assigned to a different PsyNeuLink `learning-related Component
<Composition_Learning_Components>`. These can be used to implement any form of learning. Learning is generally
considered to fall into two broad classes: *unsupervised*, in which associative strenghts are modified
by mere exposure to the inputs, in order to capture structure and/or relationships among them; and *supervised*,
which in which the associative strengths are modified so that each input generates a desired output (see
`<https://www.geeksforgeeks.org/supervised-unsupervised-learning/>`_ for a useful summary). Both forms of
learning can be implemented in a Composition, using `LearningMechanisms <LearningMechanism>` that compute the
changes to make to the `matrix <MappingProjection.matrix>` parameter of `MappingProjections <MappingProjection>`
being learned, and `LearningProjections <LearningProjection>` that apply those changes to the MappingProjections).
In addition, supervised learning uses a `ComparatorMechanism` to compute the error between the response generated by
the Composition to the input stimulus, and the target stimulus used to designate the desired response. In most
cases, the LearningMechanisms, LearningProjections and, where needed, ComparatorMechanism are generated automatically,
as described for each form of learning below. However, these can also be configured manually using their constructors,
or modified by assigning values to their attributes.
.. _Composition_Learning_Unsupervised:
Unsupervised Learning
~~~~~~~~~~~~~~~~~~~~~
Undersupervised learning is implemented using a `RecurrentTransferMechanism`, setting its **enable_learning** argument
to True, and specifying the desired `LearningFunction <LearningFunctions>` in its **learning_function** argument. The
default is `Hebbian`, however others can be specified (such as `ContrastiveHebbian` or `Kohonen`). When a
RecurrentTransferMechanism with learning enabled is added to a Composition, an `AutoAssociativeLearningMechanism` that
that is appropriate for the specified learning_function is automatically constructured and added to the Composition,
as is a `LearningProjection` from the AutoAssociativeLearningMechanism to the RecurrentTransferMechanism's
`recurrent_projection <RecurrentTransferMechanism.recurrent_projection>`. When the Composition is run and the
RecurrentTransferMechanism is executed, its AutoAssociativeLearningMechanism is also executed, which updates the `matrix
<AutoAssociativeProjection.matrix>` of its `recurrent_projection <RecurrentTransferMechanism.recurrent_projection>`
in response to its input.
COMMENT:
• DISCUSS LEARNING COMPONENTS RETURNED ONCE add_node AND add_linear_processing_pathway RETURN THEM
• ADD EXAMPLE HERE
COMMENT
.. _Composition_Learning_Supervised:
Supervised Learning
~~~~~~~~~~~~~~~~~~~
* `Composition_Learning_Methods`
* `Composition_Learning_Components`
* `Compositon_Learning_Execution`
COMMENT:
TBI: Supervised learning is implemented using a Composition's `add_learning_pathway` method, and specifying an
appropriate `LearningFunction <LearningFunctions>` in its **learning_function** argument.
XXXMORE HERE ABOUT TYPES OF FUNCTIONS
• MODIFY REFERENCE TO LEARNING COMPONENT NAMES WHEN THEY ARE IMPLEMENTED AS AN ENUM CLASS
• ADD EXAMPLES - POINT TO ONES IN BasicsAndSampler
COMMENT
.. _Composition_Learning_Methods:
*Learning Methods*
^^^^^^^^^^^^^^^^^^
Supervised learning is implemented using a Composition's method for the desired type of learning. There are currently
three such methods:
• `add_linear_learning_pathway`
• `add_reinforcement_learning_pathway`
• `add_td_learning_pathway`
• `add_backpropagation_learning_pathway`.
Each uses the Composition's `add_linear_processing_pathway` method to create a *learning sequence* specified in their
**pathway** argument: a contiguous sequence of `ProcessingMechanisms <ProcessingMechanism>` and the `MappingProjections
<MappingProjection>` between them, in which learning modifies the `matrix <MappingProjection.matrix>` parameter of the
MappingProjections in the sequence, so that the input to the first ProcessingMechanism in the sequence generates an
output from the last ProcessingMechanism that matches as closely as possible the value specified for the `target
mechanism <Process_Learning_Components>` in the **inputs** argument of the Composition's `run <Composition.run>` method.
The Mechanisms in the pathway must be compatible with learning (that is, their `function <Mechanism_Base.function>` must
be compatible with the `function <LearningMechanism.function>` of the `LearningMechanism` for the MappingProjections
they receive (see `LearningMechanism_Function`). The Composition's `learning methods <Composition_Learning_Methods>`
return the set of learning components generates for the pathway, as described below.
.. _Composition_Learning_Components:
*Learning Components*
^^^^^^^^^^^^^^^^^^^^^
For each learning sequence specified in a `learning method <Composition_Learning_Methods>`, it creates the
following Components, and assigns to them the `NodeRoles <NodeRole>` indicated:
.. _COMPARATOR_MECHANISM:
* *COMPARATOR_MECHANISM* `ComparatorMechanism` -- used to `calculate an error signal
<ComparatorMechanism_Execution>` for the sequence by comparing the value received by the ComparatorMechanism's
*SAMPLE* `InputPort <ComparatorMechanism_Structure>` (from the `output <LearningMechanism_Activation_Output>` of
the last Processing Mechanism in the learning sequence) with the value received in the *COMPARATOR_MECHANISM*'s
*TARGET* `InputPort <ComparatorMechanism_Structure>` (from the *TARGET_MECHANISM* generated by the method --
see below); this is assigned the `NodeRole` `LEARNING` in the Composition.
..
.. _TARGET_MECHANISM:
* *TARGET_MECHANISM* -- receives the value to be used by the *COMPARATOR_MECHANISM* as the target in
computing the error signal (see above); that value must be specified in the **inputs** argument of the
Composition's `run <Composition.run>` method (as the input to the *TARGET_MECHANISM*; this | |
<filename>src/sqlfluff/parser/segments_common.py
"""Common Segment Definitions.
Here we define:
- KeywordSegment
- ReSegment
These depend on the base segments, and extend them
for some more useful use cases. The intent here is that
these segments have meaning regardless of what dialect
we use, and will be common between all of them.
"""
import logging
import re
from .segments_base import (BaseSegment, RawSegment, parse_match_logging)
from .match import MatchResult
class KeywordSegment(RawSegment):
"""A segment used for matching single words or entities.
The Keyword Segment is a bit special, because while it
can be instantiated directly, we mostly generate them on the
fly for convenience. The `make` method is defined on RawSegment
instead of here, but can be used here too.
"""
type = 'keyword'
_is_code = True
_template = '<unset>'
_case_sensitive = False
@classmethod
def simple(cls, parse_context):
"""Does this matcher support a uppercase hash matching route?
The keyword segment DOES, provided that it is not case sensitive,
we return a tuple in case there is more than one option.
"""
if not cls._case_sensitive:
# NB: We go UPPER on make, so no need to convert here
return (cls._template,)
return False
@classmethod
def match(cls, segments, parse_context):
"""Compare input segments for a match, return a `MatchResult`.
Note: For Keyword matching, we only consider the *first* element,
because we assume that a keyword can only span one raw segment.
"""
# If we've been passed the singular, make it a list
if isinstance(segments, BaseSegment):
segments = [segments]
# We're only going to match against the first element
if len(segments) >= 1:
raw = segments[0].raw
pos = segments[0].pos_marker
if cls._case_sensitive:
raw_comp = raw
else:
raw_comp = raw.upper()
parse_match_logging(
cls.__name__[:10], 'match', 'KW',
parse_context=parse_context, v_level=4, pattern=cls._template, test=raw_comp, name=cls.__name__)
if cls._template == raw_comp:
m = (cls(raw=raw, pos_marker=pos),) # Return as a tuple
return MatchResult(m, segments[1:])
else:
logging.debug("{1} will not match sequence of length {0}".format(len(segments), cls.__name__))
return MatchResult.from_unmatched(segments)
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment."""
return cls._template
class ReSegment(KeywordSegment):
"""A more flexible matching segment which uses of regexes.
This is more flexible that the `KeywordSegment` but also more complicated
and so the `KeywordSegment` should be used instead wherever possible.
"""
_anti_template = None
"""If `_anti_template` is set, then we exclude anything that matches it."""
@classmethod
def simple(cls, parse_context):
"""Does this matcher support a uppercase hash matching route?
Regex segment does NOT for now. We might need to later for efficiency.
"""
return False
@classmethod
def match(cls, segments, parse_context):
"""Compare input segments for a match, return a `MatchResult`.
ReSegment implements it's own matching function where
we assume that ._template is a r"" string, and is formatted
for use directly as a regex. This only matches on a single segment.
"""
# If we've been passed the singular, make it a list
if isinstance(segments, BaseSegment):
segments = [segments]
# Regardless of what we're passed, make a string.
# NB: We only match on the first element of a set of segments.
s = segments[0].raw
# Deal with case sentitivity
if not cls._case_sensitive:
sc = s.upper()
else:
sc = s
if len(s) == 0:
raise ValueError("Zero length string passed to ReSegment!?")
parse_match_logging(
cls.__name__[:10], 'match', 'RE',
parse_context=parse_context, v_level=4, pattern=cls._template, test=sc, name=cls.__name__)
# Try the regex
result = re.match(cls._template, sc)
if result:
r = result.group(0)
# Check that we've fully matched
if r == sc:
# Check that the _anti_template (if set) hasn't also matched
if cls._anti_template and re.match(cls._anti_template, sc):
return MatchResult.from_unmatched(segments)
else:
m = (cls(raw=s, pos_marker=segments[0].pos_marker),) # Return a tuple
return MatchResult(m, segments[1:])
return MatchResult.from_unmatched(segments)
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment."""
return cls.type
class NamedSegment(KeywordSegment):
"""A segment which matches based on the `name` property of segments.
Useful for matching quoted segments, or anything else which
is largely identified by the Lexer.
"""
@classmethod
def simple(cls, parse_context):
"""Does this matcher support a uppercase hash matching route?
NamedSegment segment does NOT for now. We might need to later for efficiency.
There is a way that this *could* be enabled, by allowing *another*
shortcut route, to look ahead at the names of upcoming segments,
rather than their content.
"""
return False
@classmethod
def match(cls, segments, parse_context):
"""Compare input segments for a match, return a `MatchResult`.
NamedSegment implements it's own matching function where
we assume that ._template is the `name` of a segment.
"""
# If we've been passed the singular, make it a list
if isinstance(segments, BaseSegment):
segments = [segments]
# We only match on the first element of a set of segments
if len(segments) >= 1:
s = segments[0]
if not cls._case_sensitive:
n = s.name.upper()
else:
n = s.name
parse_match_logging(
cls.__name__[:10], 'match', 'NM',
parse_context=parse_context, v_level=4, pattern=cls._template, test=n, name=cls.__name__)
if cls._template == n:
m = (cls(raw=s.raw, pos_marker=segments[0].pos_marker),) # Return a tuple
return MatchResult(m, segments[1:])
else:
logging.debug("{1} will not match sequence of length {0}".format(len(segments), cls.__name__))
return MatchResult.from_unmatched(segments)
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment."""
return "[" + cls._template + "]"
class LambdaSegment(BaseSegment):
"""A segment which when the given lambda is applied to it returns true.
This is one of the more abstract segments, and which could be used to
implement version of most of the other kinds of segments indirectly.
It is also the most complicated and the most abstract and so should be
used thoughtfully.
"""
@classmethod
def match(cls, segments, parse_context):
"""Compare input segments for a match, return a `MatchResult`.
NamedSegment implements it's own matching function,
we assume that ._template is a function.
"""
# If we've been passed the singular, make it a list
if isinstance(segments, BaseSegment):
segments = [segments]
# We match as many of these as we can.
seg_buff = segments
matched_segs = ()
# This used to be a problem in python 2. Easier in py3
f = cls._func
while True:
if len(seg_buff) == 0:
# No buffer to work with
return MatchResult.from_matched(matched_segs)
elif f(seg_buff[0]):
# Got a match
matched_segs += (seg_buff[0],)
seg_buff = seg_buff[1:]
else:
# Got buffer but no match
return MatchResult(matched_segs, seg_buff)
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment."""
return "!!TODO!!"
@classmethod
def make(cls, func, name, **kwargs):
"""Make a subclass of the segment using a method.
Note: This requires a custom make method, because it's a bit different.
"""
# Now lets make the classname (it indicates the mother class for clarity)
classname = "{0}_{1}".format(name, cls.__name__)
# This is the magic, we generate a new class! SORCERY
newclass = type(classname, (cls, ),
dict(_func=func, _name=name, **kwargs))
# Now we return that class in the abstract. NOT INSTANTIATED
return newclass
class Indent(RawSegment):
"""A segment which is empty but indicates where an indent should be.
This segment is always empty, i.e. it's raw format is '', but it indicates
the position of a theoretical indent which will be used in linting
and reconstruction. Even if there is an *actual indent* that occurs
in the same place this intentionally *won't* capture it, they will just
be compared later.
"""
type = 'indent'
_is_code = False
_template = '<unset>'
_case_sensitive = False
indent_val = 1
is_meta = True
_config_rules = None
@classmethod
def when(cls, **kwargs):
"""Configure whether this indent/dedent is available given certain rules.
All we do is override the _config_rules parameter
for the class.
_config_rules should be an iterable of tuples (config, True|False)
which determine whether this class is enabled or not. Later elements
override earlier ones.
"""
if len(kwargs) > 1:
raise ValueError("More than one condition specified for {0!r}. [{1!r}]".format(
cls, kwargs))
# Sorcery (but less to than on KeywordSegment)
return type(
cls.__name__,
(cls, ),
dict(_config_rules=kwargs)
)
@classmethod
def is_enabled(cls, parse_context):
"""Given a certain parse context, determine if this segment is enabled.
All rules are assumed to be False if not present in the parse_context,
and later rules in the config override previous ones.
"""
# All rules are assumed to be False if not present
if cls._config_rules is not None:
config = parse_context.indentation_config or {}
# This looks like an iteration, but there | |
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.total_points
stat_stats = loaddata.total_points_stats
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.hp
stat_stats = loaddata.hp_stats
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.speed
stat_stats = loaddata.speed_stats
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.attack
stat_stats = loaddata.attack_stats
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.defense
stat_stats = loaddata.defense_stats
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.sp_attack
stat_stats = loaddata.sp_attack_stats
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.sp_defense
stat_stats = loaddata.sp_defense_stats
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.heights
stat_stats = loaddata.height_stats
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.weight
stat_stats = loaddata.weight_stats
unit = '(kg)'
else:
return
else:
return
elif data_set == "2": # trimmed pokemon
set_name = "Pokemon"
modifier = '(trimmed)'
# grass pokemon
if type_set == "1":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_grass_types['total_points']
stat_stats = loaddata.trimmed_grass_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['hp']
stat_stats = loaddata.trimmed_grass_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['speed']
stat_stats = loaddata.trimmed_grass_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['attack']
stat_stats = loaddata.trimmed_grass_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['defense']
stat_stats = loaddata.trimmed_grass_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['sp_attack']
stat_stats = loaddata.trimmed_grass_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_grass_types['sp_defense']
stat_stats = loaddata.trimmed_grass_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_grass_types['height_m']
stat_stats = loaddata.trimmed_grass_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_grass_types['weight_kg']
stat_stats = loaddata.trimmed_grass_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# fire pokemon
elif type_set == "2":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_fire_types['total_points']
stat_stats = loaddata.trimmed_fire_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['hp']
stat_stats = loaddata.trimmed_fire_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['speed']
stat_stats = loaddata.trimmed_fire_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['attack']
stat_stats = loaddata.trimmed_fire_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['defense']
stat_stats = loaddata.trimmed_fire_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['sp_attack']
stat_stats = loaddata.trimmed_fire_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_fire_types['sp_defense']
stat_stats = loaddata.trimmed_fire_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_fire_types['height_m']
stat_stats = loaddata.trimmed_fire_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_fire_types['weight_kg']
stat_stats = loaddata.trimmed_fire_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# water pokemon
elif type_set == "3":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_water_types['total_points']
stat_stats = loaddata.trimmed_water_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['hp']
stat_stats = loaddata.trimmed_water_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['speed']
stat_stats = loaddata.trimmed_water_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['attack']
stat_stats = loaddata.trimmed_water_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['defense']
stat_stats = loaddata.trimmed_water_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['sp_attack']
stat_stats = loaddata.trimmed_water_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_water_types['sp_defense']
stat_stats = loaddata.trimmed_water_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_water_types['height_m']
stat_stats = loaddata.trimmed_water_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_water_types['weight_kg']
stat_stats = loaddata.trimmed_water_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# electric pokemon
elif type_set == "4":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_electric_types['total_points']
stat_stats = loaddata.trimmed_electric_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['hp']
stat_stats = loaddata.trimmed_electric_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['speed']
stat_stats = loaddata.trimmed_electric_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['attack']
stat_stats = loaddata.trimmed_electric_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['defense']
stat_stats = loaddata.trimmed_electric_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['sp_attack']
stat_stats = loaddata.trimmed_electric_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_electric_types['sp_defense']
stat_stats = loaddata.trimmed_electric_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_electric_types['height_m']
stat_stats = loaddata.trimmed_electric_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_electric_types['weight_kg']
stat_stats = loaddata.trimmed_electric_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# psychic pokemon
elif type_set == "5":
if stat_set == "1": # stat totals
stat_name = "Stat Total"
test_bounds = (100, 600)
stat_values = loaddata.trimmed_psychic_types['total_points']
stat_stats = loaddata.trimmed_psychic_types['total_points'].describe()
unit = ''
elif stat_set == "2": # hp
stat_name = "HP"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['hp']
stat_stats = loaddata.trimmed_psychic_types['hp'].describe()
unit = ''
elif stat_set == "3": # speed
stat_name = "Speed"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['speed']
stat_stats = loaddata.trimmed_psychic_types['speed'].describe()
unit = ''
elif stat_set == "4": # attack
stat_name = "Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['attack']
stat_stats = loaddata.trimmed_psychic_types['attack'].describe()
unit = ''
elif stat_set == "5": # defense
stat_name = "Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['defense']
stat_stats = loaddata.trimmed_psychic_types['defense'].describe()
unit = ''
elif stat_set == "6": # sp.attack
stat_name = "Special Attack"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['sp_attack']
stat_stats = loaddata.trimmed_psychic_types['sp_attack'].describe()
unit = ''
elif stat_set == "7": # sp.defense
stat_name = "Special Defense"
test_bounds = (20, 256)
stat_values = loaddata.trimmed_psychic_types['sp_defense']
stat_stats = loaddata.trimmed_psychic_types['sp_defense'].describe()
unit = ''
elif stat_set == "8": # height
stat_name = "Height(m)"
test_bounds = (0, 20)
stat_values = loaddata.trimmed_psychic_types['height_m']
stat_stats = loaddata.trimmed_psychic_types['height_m'].describe()
unit = '(m)'
elif stat_set == "9": # weight
stat_name = "Weight(kg)"
test_bounds = (1, 800)
stat_values = loaddata.trimmed_psychic_types['weight_kg']
stat_stats = loaddata.trimmed_psychic_types['weight_kg'].describe()
unit = '(kg)'
else:
return
# ice pokemon
elif type_set == | |
#!/usr/bin/env python
# Copyright (c) 2007-2008 ActiveState Software Inc.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
"""Test the Python markdown2.py."""
import os
import sys
from os.path import join, dirname, abspath, exists, splitext, basename
import re
from glob import glob
from pprint import pprint
import unittest
import codecs
import difflib
import doctest
try:
from json import loads as json_loads
except ImportError:
def json_loads(s):
# Total hack to get support for 2.4. "simplejson" only supports back
# to 2.5 now and `json` is only in the Python stdlib >=2.6.
return eval(s, {}, {})
from testlib import TestError, TestSkipped, tag
sys.path.insert(0, join(dirname(dirname(abspath(__file__)))))
try:
import markdown2
finally:
del sys.path[0]
#---- Python version compat
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
unichr = chr
#---- Test cases
class _MarkdownTestCase(unittest.TestCase):
"""Helper class for Markdown tests."""
maxDiff = None
def _assertMarkdownParity(self, text):
"""Assert that markdown2.py produces same output as Markdown.pl."""
#TODO add normalization
python_html = markdown2.markdown(text)
perl_html = _markdown_with_perl(text)
close_though = ""
if python_html != perl_html \
and (python_html.replace('\n', '')
== perl_html.replace('\n', '')):
close_though = " (close though -- all but EOLs match)"
self.assertEqual(python_html, perl_html, _dedent("""\
markdown2.py didn't produce the same output as Markdown.pl%s:
---- text ----
%s ---- Python markdown2.py HTML ----
%s ---- Perl Markdown.pl HTML ----
%s""") % (close_though, _display(text),
_display(python_html), _display(perl_html)))
def _assertMarkdownPath(self, text_path, encoding="utf-8", opts=None,
toc_html_path=None, metadata_path=None):
text = codecs.open(text_path, 'r', encoding=encoding).read()
html_path = splitext(text_path)[0] + ".html"
html = codecs.open(html_path, 'r', encoding=encoding).read()
extra = {}
if toc_html_path:
extra["toc_html"] = codecs.open(toc_html_path, 'r', encoding=encoding).read()
extra["toc_html_path"] = toc_html_path
if metadata_path:
extra["metadata"] = json_loads(
codecs.open(metadata_path, 'r', encoding=encoding).read())
extra["metadata_path"] = metadata_path
self._assertMarkdown(text, html, text_path, html_path, opts=opts,
**extra)
def _assertMarkdown(self, text, html, text_path=None, html_path=None,
opts=None, toc_html=None, toc_html_path=None, metadata=None,
metadata_path=None):
"""Assert that markdown2.py produces the expected HTML."""
if text_path is None: text_path = "<text content>"
if html_path is None: html_path = "<html content>"
if opts is None:
opts = {}
norm_html = norm_html_from_html(html)
python_html = markdown2.markdown(text, **opts)
python_norm_html = norm_html_from_html(python_html)
close_though = ""
if python_norm_html != norm_html \
and (python_norm_html.replace('\n', '')
== norm_html.replace('\n', '')):
close_though = " (close though -- all but EOLs match)"
diff = ''
if python_norm_html != norm_html:
diff = difflib.unified_diff(
norm_html.splitlines(1),
python_norm_html.splitlines(1),
html_path,
"markdown2 "+text_path)
diff = ''.join(diff)
errmsg = _dedent("""\
markdown2.py didn't produce the expected HTML%s:
---- text (escaping: .=space, \\n=newline) ----
%s ---- Python markdown2.py HTML (escaping: .=space, \\n=newline) ----
%s ---- expected HTML (escaping: .=space, \\n=newline) ----
%s ---- diff ----
%s""") % (close_though, _display(text),
_display(python_html), _display(html),
_indent(diff))
def charreprreplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
if py3:
obj_repr = repr(exc.object[exc.start:exc.end])[1:-1]
else:
# repr -> remote "u'" and "'"
obj_repr = repr(exc.object[exc.start:exc.end])[2:-1]
return (unicode(obj_repr), exc.end)
codecs.register_error("charreprreplace", charreprreplace)
self.assertEqual(python_norm_html, norm_html, errmsg)
if toc_html:
python_toc_html = python_html.toc_html
python_norm_toc_html = norm_html_from_html(python_toc_html)
norm_toc_html = norm_html_from_html(toc_html)
diff = ''
if python_norm_toc_html != norm_toc_html:
diff = difflib.unified_diff(
norm_toc_html.splitlines(1),
python_norm_toc_html.splitlines(1),
toc_html_path,
"`markdown2 %s`.toc_html" % text_path)
diff = ''.join(diff)
errmsg = _dedent("""\
markdown2.py didn't produce the expected TOC HTML%s:
---- text (escaping: .=space, \\n=newline) ----
%s ---- Python markdown2.py TOC HTML (escaping: .=space, \\n=newline) ----
%s ---- expected TOC HTML (escaping: .=space, \\n=newline) ----
%s ---- diff ----
%s""") % (close_though, _display(text),
_display(python_toc_html), _display(toc_html),
_indent(diff))
self.assertEqual(python_norm_toc_html, norm_toc_html,
errmsg.encode('ascii', 'charreprreplace'))
if metadata:
self.assertEqual(python_html.metadata, metadata)
def generate_tests(cls):
"""Add test methods to this class for each test file in
`cls.cases_dir'.
"""
cases_pat = join(dirname(__file__), cls.cases_dir, "*.text")
for text_path in glob(cases_pat):
# Load an options (`*.opts` file, if any).
# It must be a Python dictionary. It will be passed as
# kwargs to the markdown function.
opts = {}
opts_path = splitext(text_path)[0] + ".opts"
if exists(opts_path):
try:
opts = eval(open(opts_path, 'r').read())
except Exception:
_, ex, _ = sys.exc_info()
print("WARNING: couldn't load `%s' opts file: %s" \
% (opts_path, ex))
toc_html_path = splitext(text_path)[0] + ".toc_html"
if not exists(toc_html_path):
toc_html_path = None
metadata_path = splitext(text_path)[0] + ".metadata"
if not exists(metadata_path):
metadata_path = None
test_func = lambda self, t=text_path, o=opts, c=toc_html_path, m=metadata_path: \
self._assertMarkdownPath(t, opts=o, toc_html_path=c,
metadata_path=m)
tags_path = splitext(text_path)[0] + ".tags"
if exists(tags_path):
tags = []
for line in open(tags_path):
if '#' in line: # allow comments in .tags files
line = line[:line.index('#')]
tags += line.split()
test_func.tags = tags
name = splitext(basename(text_path))[0]
name = name.replace(' - ', '_')
name = name.replace(' ', '_')
name = re.sub("[(),]", "", name)
test_name = "test_%s" % name
setattr(cls, test_name, test_func)
generate_tests = classmethod(generate_tests)
class TMTestCase(_MarkdownTestCase):
cases_dir = "tm-cases"
class MarkdownTestTestCase(_MarkdownTestCase):
"""Test cases from MarkdownTest-1.0."""
cases_dir = "markdowntest-cases"
class PHPMarkdownTestCase(_MarkdownTestCase):
"""Test cases from MDTest."""
cases_dir = "php-markdown-cases"
class PHPMarkdownExtraTestCase(_MarkdownTestCase):
"""Test cases from MDTest.
These are all knownfailures because these test non-standard Markdown
syntax no implemented in markdown2.py. See
<http://www.michelf.com/projects/php-markdown/extra/> for details.
"""
__tags__ = ["knownfailure"]
cases_dir = "php-markdown-extra-cases"
class DirectTestCase(_MarkdownTestCase):
"""These are specific test that I found were broken in
Python-markdown (markdown.py).
"""
def test_slow_hr(self):
import time
text = """\
* * *
This on *almost* looks like an hr, except for the trailing '+'. In older
versions of markdown2.py this was pathologically slow:
- - - - - - - - - - - - - - - - - - - - - - - - - +
"""
html = """\
<hr />
<p>This on <em>almost</em> looks like an hr, except for the trailing '+'. In older
versions of markdown2.py this was pathologically slow:</p>
<p>- - - - - - - - - - - - - - - - - - - - - - - - - +</p>
"""
start = time.time()
self._assertMarkdown(text, html)
end = time.time()
delta = end - start
self.assertTrue(delta < 1.0, "It took more than 1s to process "
"'slow-hr'. It took %.2fs. Too slow!" % delta)
test_slow_hr.tags = ["perf"]
def test_code_in_strong(self):
self._assertMarkdown(
'**look at `this code` call**',
'<p><strong>look at <code>this code</code> call</strong></p>\n')
test_code_in_strong.tags = ["code", "strong"]
def test_starter_pre(self):
self._assertMarkdown(
_indent('#!/usr/bin/python\nprint "hi"'),
'<pre><code>#!/usr/bin/python\nprint "hi"\n</code></pre>\n')
test_starter_pre.tags = ["pre", "recipes"]
def test_pre(self):
self._assertMarkdown(_dedent('''\
some starter text
#!/usr/bin/python
print "hi"'''),
'<p>some starter text</p>\n\n<pre><code>#!/usr/bin/python\nprint "hi"\n</code></pre>\n')
def test_russian(self):
ko = '\u043b\u0449' # 'ko' on russian keyboard
self._assertMarkdown("## %s" % ko,
'<h2>%s</h2>\n' % ko)
test_russian.tags = ["unicode", "issue3"]
class DocTestsTestCase(unittest.TestCase):
def test_api(self):
if sys.version_info[:2] < (2,4):
raise TestSkipped("no DocFileTest in Python <=2.3")
test = doctest.DocFileTest("api.doctests")
test.runTest()
# Don't bother on Python 3 because (a) there aren't many inline doctests,
# and (b) they are more to be didactic than comprehensive test suites.
if not py3:
def test_internal(self):
doctest.testmod(markdown2)
#---- internal support stuff
_xml_escape_re = re.compile(r'&#(x[0-9A-Fa-f]{2,3}|[0-9]{2,3});')
def _xml_escape_sub(match):
escape = match.group(1)
if escape[0] == 'x':
return unichr(int('0'+escape, base=16))
else:
return unichr(int(escape))
_markdown_email_link_re = re.compile(r'<a href="(.*?&#.*?)">(.*?)</a>', re.U)
def _markdown_email_link_sub(match):
href, text = match.groups()
href = _xml_escape_re.sub(_xml_escape_sub, href)
text = _xml_escape_re.sub(_xml_escape_sub, text)
return '<a href="%s">%s</a>' % (href, text)
def norm_html_from_html(html):
"""Normalize (somewhat) Markdown'd HTML.
Part of Markdown'ing involves obfuscating email links with
randomize encoding. Undo that obfuscation.
Also normalize EOLs.
"""
if not isinstance(html, unicode):
html = html.decode('utf-8')
html = _markdown_email_link_re.sub(
_markdown_email_link_sub, html)
if sys.platform == "win32":
html = html.replace('\r\n', '\n')
return html
def _display(s):
"""Markup the given string for useful display."""
if not isinstance(s, unicode):
s = s.decode("utf-8")
s = _indent(_escaped_text_from_text(s, "whitespace"), 4)
if not s.endswith('\n'):
s += '\n'
return s
def _markdown_with_perl(text):
markdown_pl = join(dirname(__file__), "Markdown.pl")
if not exists(markdown_pl):
raise OSError("`%s' does not exist: get it from "
"http://daringfireball.net/projects/markdown/"
% markdown_pl)
i, o = os.popen2("perl %s" % markdown_pl)
i.write(text)
i.close()
html = o.read()
o.close()
return html
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
| |
database, if project
data base does not exist, throw an exception
inputs:
fileName = name of raw telemetry data file with full directory and extenstion
dbName = name of project database with full directory and extension
recName = official receiver name'''
# declare the workspace - in practice we will identify all files in diretory and iterate over them as part of function, all this crap passed as parameters
recType = 'lotek'
headerDat = {} # create empty dictionary to hold Lotek header data indexed by line number - to be imported to Pandas dataframe
lineCounter = [] # create empty array to hold line indices
lineList = [] # generate a list of header lines - contains all data we need to write to project set up database
o_file = open(fileName, encoding='utf-8')
counter = 0 # start line counter
line = o_file.readline()[:-1] # read first line in file
lineCounter.append(counter) # append the current line counter to the counter array
lineList.append(line) # append the current line of header data to the line list
if line == "SRX800 / 800D Information:":
# find where data begins and header data ends
with o_file as f:
for line in f:
if "** Data Segment **" in line:
counter = counter + 1
dataRow = counter + 5 # if this current line signifies the start of the data stream, the data starts three rows down from this
break # break the loop, we have reached our stop point
else:
counter = counter + 1 # if we are still reading header data increase the line counter by 1
lineCounter.append(counter) # append the line counter to the count array
lineList.append(line) # append line of data to the data array
headerDat['idx'] = lineCounter # add count array to dictionary with field name 'idx' as key
headerDat['line'] = lineList # add data line array to dictionary with field name 'line' as key
headerDF = pd.DataFrame.from_dict(headerDat) # create pandas dataframe of header data indexed by row number
headerDF.set_index('idx',inplace = True)
# find scan time
for row in headerDF.iterrows(): # for every header data row
if 'Scan Time' in row[1][0]: # if the first 9 characters of the line say 'Scan Time' = we have found the scan time in the document
scanTimeStr = row[1][0][-7:-1] # get the number value from the row
scanTimeSplit = scanTimeStr.split(':') # split the string
scanTime = float(scanTimeSplit[1]) # convert the scan time string to float
break # stop that loop, we done
del row
# find number of channels and create channel dictionary
scanChan = [] # create empty array of channel ID's
channelDict = {} # create empty channel ID: frequency dictionary
counter = 0 # create counter
rows = headerDF.iterrows() # create row iterator
for row in rows: # for every row
if 'Active scan_table:' in row[1][0]: # if the first 18 characters say what that says
idx0 = counter + 2 # channel dictionary data starts two rows from here
while next(rows)[1][0] != '\n': # while the next row isn't empty
counter = counter + 1 # increase the counter, when the row is empty we have reached the end of channels, break loop
idx1 = counter + 1 # get index of last data row
break # break that loop, we done
else:
counter = counter + 1 # if it isn't a data row, increase the counter by 1
del row, rows
channelDat = headerDF.iloc[idx0:idx1] # extract channel dictionary data using rows identified earlier
for row in channelDat.iterrows():
dat = row[1][0]
channel = int(dat[0:4])
frequency = dat[10:17]
channelDict[channel] = frequency
scanChan.append(channel) # extract that channel ID from the data row and append to array
channels = len(scanChan)
conn = sqlite3.connect(dbName, timeout=30.0)
c = conn.cursor()
study_tags = pd.read_sql('SELECT FreqCode, TagType FROM tblMasterTag',con = conn)
study_tags = study_tags[study_tags.TagType == 'Study'].FreqCode.values
# with our data row, extract information using pandas fwf import procedure
#Depending on firmware the data structure will change. This is for xxx firmware. See below for additional firmware configs
# telemDat = pd.read_fwf(fileName,colspecs = [(0,8),(8,18),(18,28),(28,36),(36,51),(51,59)],names = ['Date','Time','ChannelID','TagID','Antenna','Power'],skiprows = dataRow)
# telemDat = telemDat.iloc[:-2] # remove last two rows, Lotek adds garbage at the end
#Master Firmware: Version 9.12.5
telemDat = pd.read_fwf(fileName,colspecs = [(0,8),(8,23),(23,33),(33,41),(41,56),(56,64)],names = ['Date','Time','ChannelID','TagID','Antenna','Power'],skiprows = dataRow)
telemDat = telemDat.iloc[:-2] # remove last two
telemDat['Antenna'] = telemDat['Antenna'].astype(str) #TCS Added this to get dict to line up with data
telemDat['fileName'] = np.repeat(rxfile,len(telemDat)) # Adding the filename into the dataset...drop the path (note this may cause confusion because above we use filename with path. Decide what to do and fix)
def id_to_freq(row,channelDict):
if row[2] in channelDict:
return channelDict[row[2]]
else:
return '888'
if len(telemDat) > 0:
if ant_to_rec_dict == None:
telemDat['Frequency'] = telemDat.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat = telemDat[telemDat.Frequency != '888']
telemDat = telemDat[telemDat.TagID != 999]
telemDat['FreqCode'] = telemDat['Frequency'].astype(str) + ' ' + telemDat['TagID'].astype(int).astype(str)
telemDat['timeStamp'] = pd.to_datetime(telemDat['Date'] + ' ' + telemDat['Time'])# create timestamp field from date and time and apply to index
telemDat['Epoch'] = (telemDat['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat = noiseRatio(5.0,telemDat,study_tags)
telemDat.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat['ScanTime'] = np.repeat(scanTime,len(telemDat))
telemDat['Channels'] = np.repeat(channels,len(telemDat))
telemDat['RecType'] = np.repeat(recType,len(telemDat))
telemDat['recID'] = np.repeat(recName,len(telemDat))
telemDat.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
for ant in ant_to_rec_dict:
site = ant_to_rec_dict[ant]
telemDat_sub = telemDat[telemDat.Antenna == str(ant)]
telemDat_sub['Frequency'] = telemDat_sub.apply(id_to_freq, axis = 1, args = (channelDict,))
telemDat_sub = telemDat_sub[telemDat_sub.Frequency != '888']
telemDat_sub = telemDat_sub[telemDat_sub.TagID != 999]
telemDat_sub['FreqCode'] = telemDat_sub['Frequency'].astype(str) + ' ' + telemDat_sub['TagID'].astype(int).astype(str)
telemDat_sub['timeStamp'] = pd.to_datetime(telemDat_sub['Date'] + ' ' + telemDat_sub['Time'])# create timestamp field from date and time and apply to index
telemDat_sub['Epoch'] = (telemDat_sub['timeStamp'] - datetime.datetime(1970,1,1)).dt.total_seconds()
telemDat_sub = noiseRatio(5.0,telemDat_sub,study_tags)
telemDat_sub.drop (['Date','Time','Frequency','TagID','ChannelID','Antenna'],axis = 1, inplace = True)
telemDat_sub['ScanTime'] = np.repeat(scanTime,len(telemDat_sub))
telemDat_sub['Channels'] = np.repeat(channels,len(telemDat_sub))
telemDat_sub['RecType'] = np.repeat(recType,len(telemDat_sub))
telemDat_sub['recID'] = np.repeat(site,len(telemDat_sub))
telemDat_sub.to_sql('tblRaw',con = conn,index = False, if_exists = 'append')
else:
lotek400 = False
# find where data begins and header data ends
with o_file as f:
for line in f:
if "********************************* Data Segment *********************************" in line:
counter = counter + 1
dataRow = counter + 5 # if this current line signifies the start of the data stream, the data starts three rows down from this
break # break the loop, we have reached our stop point
elif line[0:14] == "Code_log data:":
counter = counter + 1
dataRow = counter + 3
lotek400 = True
break
else:
counter = counter + 1 # if we are still reading header data increase the line counter by 1
lineCounter.append(counter) # append the line counter to the count array
lineList.append(line) # append line of data to the data array
headerDat['idx'] = lineCounter # add count array to dictionary with field name 'idx' as key
headerDat['line'] = lineList # add data line array to dictionary with field name 'line' as key
headerDF = pd.DataFrame.from_dict(headerDat) # create pandas dataframe of header data indexed by row number
headerDF.set_index('idx',inplace = True)
# find scan time
for row in headerDF.iterrows(): # for every header data row
if 'scan time' in row[1][0] or 'Scan time' in row[1][0]: # if the first 9 characters of the line say 'Scan Time' = we have found the scan time in the document
scanTimeStr = row[1][0][-7:-1] # get the number value from the row
scanTimeSplit = scanTimeStr.split(':') # split the string
scanTime = float(scanTimeSplit[1]) # convert the scan time string to float
break # stop that loop, we done
del row
# find number of channels and create channel dictionary
scanChan = [] # create empty array of channel ID's
channelDict = {} # create empty channel ID: frequency dictionary
counter = 0 # create counter
rows = headerDF.iterrows() # create row iterator
for row in rows: # for every row
if 'Active scan_table:' in row[1][0]: # if the first 18 characters say what that says
idx0 = counter + 2 # channel dictionary data starts two rows | |
# Importations
from keras.applications import ResNet50, VGG16, imagenet_utils
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing.image import img_to_array, load_img
from keras.utils import to_categorical
from imutils import paths
import numpy as np
import pickle
import random
import os
# Extract features for each set VGG
def ExtractFeatures_VGG(TRAIN='training',
TEST='evaluation',
VAL='validation',
BATCH_SIZE=32,
BASE_PATH=os.path.join('TransferLearningKeras', 'dataset'),
BASE_CSV_PATH=os.path.join('TransferLearningKeras', 'output'),
LE_PATH=os.path.join('TransferLearningKeras', 'output'),
):
# load the VGG16 network and initialize the label encoder
print("[INFO] loading network...")
model = VGG16(weights="imagenet", include_top=False) # No top layer
le = None
# Loop over the different sets
for split in (TRAIN, TEST, VAL):
# Get all images (paths)
print("[INFO] processing '{} split'...".format(split))
p = os.path.sep.join([BASE_PATH, split])
imagePaths = list(paths.list_images(p))
# Shuffle them and extract the labels from the paths
random.shuffle(imagePaths)
labels = [p.split(os.path.sep)[-2] for p in imagePaths]
# If the label encoder is None, create it
if le is None:
le = LabelEncoder()
le.fit(labels)
# Open the output CSV file for writing
csvPath = os.path.sep.join([BASE_CSV_PATH, "{}.csv".format(split)])
csv = open(csvPath, "w")
# loop over the images in batches
for (b, i) in enumerate(range(0, len(imagePaths), BATCH_SIZE)):
# extract the batch of images and labels, then initialize the
print("[INFO] processing batch {}/{}".format(b + 1,
int(np.ceil(len(imagePaths) / float(BATCH_SIZE))))
)
# Get a batch of images *paths* to extract features
batchPaths = imagePaths[i:i + BATCH_SIZE]
batchLabels = le.transform(labels[i:i + BATCH_SIZE])
batchImages = []
# loop over the images and labels in the current batch
for imagePath in batchPaths:
# Load image using the Keras helper utility
# Resize to 224x224 pixels (to match VGG16 architecture)
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# Preprocess: (1) expand the dimensions and (2) substract the mean RGB intensity
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# Add the image *array* to the batch
batchImages.append(image)
# Pass the images through the network and use the outputs as
# our actual features, then reshape the features into a
# flattened volume = vector
batchImages = np.vstack(batchImages) # Stacked to match Keras format
features = model.predict(batchImages, batch_size=BATCH_SIZE) # Get features for BS images
features = features.reshape((features.shape[0], 7 * 7 * 512)) # Reshape to create a flattened vector BS, FEAT
# Loop over the class labels and extracted features
for (label, vec) in zip(batchLabels, features):
# construct a row that exists of the class label and extracted features
vec = ",".join([str(v) for v in vec]) # Join features by comma ,
csv.write("{},{}\n".format(label, vec)) # Write label and vector of features to csv separate by ,
# close the CSV file
csv.close()
# Serialize the label encoder to disk
f = open(LE_PATH, "wb")
f.write(pickle.dumps(le))
f.close()
# Extract features for specific Batch
def ExtractFeatures_batch_VGG(image_batch_paths):
# load the VGG16 network and initialize the label encoder
print("[INFO] loading VGG16 network...")
model = VGG16(weights="imagenet", include_top=False) # No top layer
# Containers
batchImages = []
# loop over the images and labels in the current batch
for imagePath in image_batch_paths:
# Load image using the Keras helper utility
# Resize to 224x224 pixels (to match VGG16 architecture)
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# Preprocess: (1) expand the dimensions and (2) substract the mean RGB intensity
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# Add the image *array* to the batch
batchImages.append(image)
# Pass the images through the network and use the outputs as
# our actual features, then reshape the features into a
# flattened volume = vector
batchImages = np.vstack(batchImages) # Stacked to match Keras format
features = model.predict(batchImages, batch_size=len(batchImages)) # Get features for BS images
features = features.reshape((features.shape[0], 7 * 7 * 512)) # Reshape to create a flattened vector BS, FEAT
# Return features
return features
# Extract features for each set ResNet
def ExtractFeatures_ResNet(TRAIN='training',
TEST='evaluation',
VAL='validation',
BATCH_SIZE=32,
BASE_PATH=os.path.join('TransferLearningKeras', 'dataset'),
BASE_CSV_PATH=os.path.join('TransferLearningKeras', 'output'),
LE_PATH=os.path.join('TransferLearningKeras', 'output'),
):
# load the ResNet50 network and initialize the label encoder
print("[INFO] loading ResNet50 network...")
model = ResNet50(weights="imagenet", include_top=False)
le = None
# loop over the data splits
for split in (TRAIN, TEST, VAL):
# grab all image paths in the current split
print("[INFO] processing '{} split'...".format(split))
p = os.path.sep.join([BASE_PATH, split])
imagePaths = list(paths.list_images(p))
# randomly shuffle the image paths and then extract the class
# labels from the file paths
random.shuffle(imagePaths)
labels = [p.split(os.path.sep)[-2] for p in imagePaths]
# if the label encoder is None, create it
if le is None:
le = LabelEncoder()
le.fit(labels)
# open the output CSV file for writing
csvPath = os.path.sep.join([BASE_CSV_PATH,
"{}.csv".format(split)])
csv = open(csvPath, "w")
# loop over the images in batches
for (b, i) in enumerate(range(0, len(imagePaths), BATCH_SIZE)):
# extract the batch of images and labels, then initialize the
# list of actual images that will be passed through the network
# for feature extraction
print("[INFO] processing batch {}/{}".format(b + 1,
int(np.ceil(len(imagePaths) / float(BATCH_SIZE)))))
batchPaths = imagePaths[i:i + BATCH_SIZE]
batchLabels = le.transform(labels[i:i + BATCH_SIZE])
batchImages = []
# loop over the images and labels in the current batch
for imagePath in batchPaths:
# load the input image using the Keras helper utility
# while ensuring the image is resized to 224x224 pixels
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# preprocess the image by (1) expanding the dimensions and
# (2) subtracting the mean RGB pixel intensity from the
# ImageNet dataset
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# add the image to the batch
batchImages.append(image)
# pass the images through the network and use the outputs as
# our actual features, then reshape the features into a
# flattened volume
batchImages = np.vstack(batchImages)
features = model.predict(batchImages, batch_size=BATCH_SIZE)
features = features.reshape((features.shape[0], 7 * 7 * 2048))
# loop over the class labels and extracted features
for (label, vec) in zip(batchLabels, features):
# construct a row that exists of the class label and
# extracted features
vec = ",".join([str(v) for v in vec])
csv.write("{},{}\n".format(label, vec))
# close the CSV file
csv.close()
# serialize the label encoder to disk
f = open(LE_PATH, "wb")
f.write(pickle.dumps(le))
f.close()
# Extract features for specific Batch
def ExtractFeatures_batch_ResNet(image_batch_paths):
# load the VGG16 network and initialize the label encoder
print("[INFO] loading ResNet50 network...")
model = ResNet50(weights="imagenet", include_top=False) # No top layer
# Containers
batchImages = []
# loop over the images and labels in the current batch
for imagePath in image_batch_paths:
# Load image using the Keras helper utility
# Resize to 224x224 pixels (to match VGG16 architecture)
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# Preprocess: (1) expand the dimensions and (2) substract the mean RGB intensity
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# Add the image *array* to the batch
batchImages.append(image)
# Pass the images through the network and use the outputs as
# our actual features, then reshape the features into a
# flattened volume = vector
batchImages = np.vstack(batchImages) # Stacked to match Keras format
features = model.predict(batchImages, batch_size=len(batchImages)) # Get features for BS images
features = features.reshape((features.shape[0], 7 * 7 * 2048)) # Reshape to create a flattened vector BS, FEAT
# Return features
return features
# Custom Data generator: to yield csv features
def csv_feature_generator(inputPath,
bs,
numClasses,
mode="train"):
# open the input file for reading
f = open(inputPath, "r")
# loop indefinitely
while True:
# initialize our batch of data and labels
data = []
labels = []
# keep looping until we reach our batch size
while len(data) < bs:
# attempt to read the next row of the CSV file
row = f.readline()
# check to see if the row is empty, indicating we have
# reached the end of the file
if row == "":
# reset the file pointer to the beginning of the file
# and re-read the row
f.seek(0)
row = f.readline()
# if we are evaluating we should now break from our
# loop to ensure we don't continue to fill up the
# batch from samples at the beginning of the file
| |
from multiprocessing.sharedctypes import Value
import pandas as pd
from pyomo.environ import *
from pyomo.environ import units as pyunits
# from pyomo.repn.plugins.baron_writer import NonNegativeReals
from watertap3.utils import financials
from watertap3.wt_units.wt_unit import WT3UnitProcess
import idaes.core.util.scaling as iscale
## REFERENCE: ADD REFERENCE HERE
module_name = 'ion_exchange'
basis_year = 2016 # 2016 is costing year for EPA component costing data
tpec_or_tic = 'TIC'
class UnitProcess(WT3UnitProcess):
def fixed_cap(self, unit_params):
'''
Docstrings go here.
:return:
'''
self.total_ix_cap = Var(initialize=25,
# domain=NonNegativeReals,
doc='Total ion exchange FCI [$MM]')
self.cap_per_column = Var(initialize=1,
# domain=NonNegativeReals,
doc='Capital per column [$MM]')
self.column_total_cap = Var(initialize=1,
# domain=NonNegativeReals,
doc='Total column capital [$MM]')
self.resin_unit_cap = Var(initialize=4000,
# domain=NonNegativeReals,
doc='Resin cap per m3 [$/m3]')
self.resin_cap = Var(initialize=1E4,
# domain=NonNegativeReals,
doc='Resin capital [$MM]')
self.regen_pump_cap = Var(initialize=100,
domain=NonNegativeReals,
doc='Pump capital for regen cycle [$MM]')
self.bw_pump_cap = Var(initialize=100,
# domain=NonNegativeReals,
doc='Pump capital for backwash cycle [$MM]')
self.rinse_pump_cap = Var(initialize=100,
# domain=NonNegativeReals,
doc='Pump capital for rinse cycle [$MM]')
self.boost_pump_cap = Var(initialize=100,
# domain=NonNegativeReals,
doc='Pump capital for booster pump [$MM]')
self.total_pump_cap = Var(initialize=100,
# domain=NonNegativeReals,
doc='Total capital for IX pumps [$MM]')
if self.pv_material == 'carbon_w_stainless_internals':
self.cap_per_column_constr = Constraint(expr=self.cap_per_column ==
(16504 * self.column_vol ** 0.43) * 1E-6)
# self.cap_per_column = (16504 * self.column_vol ** 0.43) * 1E-6
if self.pv_material == 'carbon_w_plastic_internals':
self.cap_per_column_constr = Constraint(expr=self.cap_per_column ==
(9120 * self.column_vol ** 0.49) * 1E-6)
# self.cap_per_column = (9120 * self.column_vol ** 0.49) * 1E-6
if self.pv_material == 'fiberglass':
self.cap_per_column_constr = Constraint(expr=self.cap_per_column ==
(5637 * self.column_vol ** 0.9) * 1E-6)
# self.cap_per_column = (5637 * self.column_vol ** 0.9) * 1E-6
self.col_total_cap_constr = Constraint(expr=self.column_total_cap == self.cap_per_column * (self.num_columns + 1))
# self.column_total_cap = self.cap_per_column * (self.num_columns + 1)
self.resin_unit_cap.fix(self.resin_dict[self.resin_type])
# self.resin_unit_cap = self.resin_dict[self.resin_type]
self.resin_cap_constr = Constraint(expr=self.resin_cap == ((self.resin_vol + self.resin_vol_per_col) * self.resin_unit_cap) * 1E-6) # include an additional resin vol per column to account for the extra column
# self.resin_cap = ((self.resin_vol + self.resin_vol_per_col) * self.resin_unit_cap) * 1E-6
self.regen_pump_cap_constr = Constraint(expr=self.regen_pump_cap == (-24.257 * self.regen_flow ** 2 + 2803.7 * self.regen_flow + 7495.7) *
(self.num_columns + 1) * 1E-6) # assumes centrifugal pump and 1 pump per column
# self.regen_pump_cap = (-24.257 * self.regen_flow ** 2 + 2803.7 * self.regen_flow + 7495.7) * (self.num_columns + 1) * 1E-6
self.bw_pump_cap_constr = Constraint(expr=self.bw_pump_cap == (-24.257 * self.bw_flow ** 2 + 2803.7 * self.bw_flow + 7495.7) *
(self.num_columns + 1) * 1E-6) # assumes centrifugal pump and 1 pump per column
# self.bw_pump_cap = (-24.257 * self.bw_flow ** 2 + 2803.7 * self.bw_flow + 7495.7) * (self.num_columns + 1) * 1E-6
self.rinse_pump_cap_constr = Constraint(expr=self.rinse_pump_cap == (-24.257 * self.rinse_flow ** 2 + 2803.7 * self.rinse_flow + 7495.7) *
(self.num_columns + 1) * 1E-6) # assumes centrifugal pump and 1 pump per column
# self.rinse_pump_cap = (-24.257 * self.rinse_flow ** 2 + 2803.7 * self.rinse_flow + 7495.7) * (self.num_columns + 1) * 1E-6
self.flow_per_col_m3_min = pyunits.convert(self.flow_per_column, to_units=pyunits.m ** 3 / pyunits.min)
self.boost_pump_cap_constr = Constraint(expr=self.boost_pump_cap == (-24.257 * self.flow_per_col_m3_min ** 2 + 2803.7 * self.flow_per_col_m3_min + 7495.7) *
(self.num_columns + 1) * 1E-6) # assumes centrifugal pump and 1 pump per column
# self.boost_pump_cap = (-24.257 * self.flow_per_col_m3_min ** 2 + 2803.7 * self.flow_per_col_m3_min + 7495.7) * (self.num_columns + 1) * 1E-6
self.total_pump_cap_constr = Constraint(expr=self.total_pump_cap ==
self.regen_pump_cap + self.bw_pump_cap + self.rinse_pump_cap + self.boost_pump_cap)
# self.total_ix_cap_constr = Constraint(expr=self.total_ix_cap ==
# self.column_total_cap + self.resin_cap + self.regen_pump_cap + self.bw_pump_cap + self.rinse_pump_cap + self.boost_pump_cap)
self.total_ix_cap_constr = Constraint(expr=self.total_ix_cap ==
(self.column_total_cap + self.resin_cap + self.total_pump_cap) * self.tpec_tic)
return self.total_ix_cap
def elect(self):
'''
Electricity intensity for ion exchange
:return:
'''
self.main_pump_power = Var(initialize=4E-6,
units=pyunits.kW,
# domain=NonNegativeReals,
doc='Main pump power [kW]')
self.regen_pump_power = Var(initialize=4E-6,
units=pyunits.kW,
# domain=NonNegativeReals,
doc='Regen pump power [kW]')
self.bw_pump_power = Var(initialize=4E-6,
units=pyunits.kW,
# domain=NonNegativeReals,
doc='Backwash pump power [kW]')
self.rinse_pump_power = Var(initialize=4E-6,
units=pyunits.kW,
# domain=NonNegativeReals,
doc='Rinse pump power [kW]')
self.total_pump_power = Var(initialize=4E-5,
units=pyunits.kW,
# domain=NonNegativeReals,
doc='Total pump power [kW]')
self.ix_electricity_intensity = Var(
initialize=4E-5,
units=pyunits.kWh/pyunits.m**3,
# domain=NonNegativeReals,
doc='Total IX electricity intensity [kWh/m3]')
flow_in_m3_hr = pyunits.convert(self.flow_vol_in[self.t], to_units=pyunits.m ** 3 / pyunits.hr)
flow_out_m3_hr = pyunits.convert(self.flow_vol_out[self.t], to_units=pyunits.m ** 3 / pyunits.hr)
flow_waste_m3_hr = pyunits.convert(self.flow_vol_waste[self.t], to_units=pyunits.m ** 3 / pyunits.hr)
flow_per_column_m3_s = pyunits.convert(self.flow_per_column, to_units=pyunits.m ** 3 / pyunits.second)
regen_flow_m3_s = pyunits.convert(self.regen_flow / self.num_columns, to_units=pyunits.m ** 3 / pyunits.second)
bw_flow_m3_s = pyunits.convert(self.bw_flow / self.num_columns, to_units=pyunits.m ** 3 / pyunits.second)
rinse_flow_m3_s = pyunits.convert(self.rinse_flow / self.num_columns, to_units=pyunits.m ** 3 / pyunits.second)
self.pump_efficiency = Var(initialize=0.7, units=pyunits.dimensionless, doc='Pump Efficiency [dimensionless]')
self.g = Param(initialize=9.81, units=pyunits.m / pyunits.second ** 2, doc='Gravity [m/s2]')
self.rho = Param(initialize=1000, units=pyunits.kg / pyunits.m ** 3, doc='Pure Water Density [kg/m3]')
self.pressure_drop_m = self.pressure_drop * (0.70325 * (pyunits.m / pyunits.psi)) ## 1 psi of differential pressure = 0.70325 m pump head
self.pump_efficiency.fix(0.7)
self.main_pump_power_constr = Constraint(expr=self.main_pump_power ==
pyunits.convert(((self.rho * self.g * self.pressure_drop_m * flow_per_column_m3_s) /
self.pump_efficiency), to_units=pyunits.kilowatts)) # per column
self.regen_pump_power_constr = Constraint(expr=self.regen_pump_power ==
pyunits.convert(((self.regen_density * self.g * self.pressure_drop_m * regen_flow_m3_s) /
self.pump_efficiency), to_units=pyunits.kilowatts)) # per column
self.bw_pump_power_constr = Constraint(expr=self.bw_pump_power ==
pyunits.convert(((self.rho * self.g * self.pressure_drop_m * bw_flow_m3_s) /
self.pump_efficiency), to_units=pyunits.kilowatts)) # per column
self.rinse_pump_power_constr = Constraint(expr=self.rinse_pump_power ==
pyunits.convert(((self.rho * self.g * self.pressure_drop_m * rinse_flow_m3_s) /
self.pump_efficiency), to_units=pyunits.kilowatts)) # per column
self.total_pump_power_constr = Constraint(expr=self.total_pump_power ==
(self.main_pump_power + self.regen_pump_power + self.bw_pump_power + self.rinse_pump_power) * (self.num_columns + 1))
self.ix_electricity_intensity_constr = Constraint(expr=self.ix_electricity_intensity == self.total_pump_power / flow_out_m3_hr)
return self.ix_electricity_intensity
def ix_setup(self, unit_params):
self.ix_regen(unit_params)
self.ix_backwash()
self.ix_rinse()
self.ix_constituents(unit_params)
self.ix_resin()
self.ix_column()
self.ix_constraints()
def ix_regen(self, unit_params):
### REGEN VARIABLES
self.regen_dose = Var(initialize=300,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.m ** 3,
bounds=(80, 500),
doc='NaCl dose required for regeneration [kg/m3]')
self.regen_rate = Var(initialize=4,
# domain=NonNegativeReals,
bounds=(2, 5),
doc='Regeneration rate [BV/hr]')
self.regen_density = Var(initialize=1000,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.m ** 3,
bounds=(990, 1200),
doc='Density of NaCl regen solution [kg/m3]')
self.regen_ww = Var(initialize=0.1,
# domain=NonNegativeReals,
bounds=(0.015, 0.26),
doc='Strength of NaCl solution w/w [kg NaCl/kg soln]')
self.regen_conc = Var(initialize=110,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.m ** 3,
doc='Concentration of regen solution [kg/m3]')
self.regen_vol = Var(initialize=2,
# domain=NonNegativeReals,
doc='m3 of regen solution per m3 resin')
self.regen_soln_per_column = Var(initialize=50,
# domain=NonNegativeReals,
units=pyunits.m ** 3,
doc='Regen solution used per column [m3/column]')
self.regen_soln_per_column_annual = Var(initialize=1E3,
# domain=NonNegativeReals,
units=pyunits.m ** 3 / pyunits.year,
doc='Annual regen used per column [m3/year]')
self.regen_soln_annual = Var(initialize=1E5,
# domain=NonNegativeReals,
units=pyunits.m ** 3 / pyunits.year,
doc='Total volume regen solution used [m3/year]')
self.regen_time_per_column = Var(initialize=5,
# domain=NonNegativeReals,
units=pyunits.min,
doc='Regen time per column [min]')
self.regen_flow = Var(initialize=10,
# domain=NonNegativeReals,
bounds=(0.01, 1E5),
units=pyunits.m ** 3 / pyunits.min,
doc='Regeneration flow rate [m3/min]')
self.num_regen_per_column_annual = Var(initialize=200,
# domain=NonNegativeReals,
doc='Number of regen cycles per year')
self.salt_per_regen_per_column = Var(initialize=5E3,
# domain=NonNegativeReals,
doc='Number of regen cycles per year')
self.salt_per_column_annual = Var(initialize=1E5,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.year,
doc='Mass of salt per column per year [kg/yr]')
self.salt_total_annual = Var(initialize=1E6,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.year,
doc='Mass of salt per year [kg/yr]')
self.salt_dose = Var(initialize=0.1,
# domain=NonNegativeReals,
units=pyunits.kg / pyunits.m ** 3,
doc='Salt dose for system [kg/m3]')
self.total_regen_time = Var(initialize=30,
units=pyunits.min,
domain=NonNegativeReals,
doc='Total regeneration cycle time [min]')
self.regen_dose.fix(300)
if 'regen_ww' in unit_params.keys():
self.regen_ww.fix(unit_params['regen_ww'])
else:
self.regen_ww.fix(0.1)
def ix_backwash(self):
if self.mode == 'sac':
### BACKWASH VARIABLES
self.bw_rate = Var(initialize=5,
# domain=NonNegativeReals,
units=pyunits.m / pyunits.hour,
bounds=(4.5, 6.5),
doc='Backwash rate [m/hr]')
self.bw_time = Var(initialize=6,
# domain=NonNegativeReals,
units=pyunits.minute,
bounds=(4, 15),
doc='Backwash time [min]')
self.bw_flow = Var(initialize=5,
# domain=NonNegativeReals,
units=pyunits.m ** 3 / pyunits.minute,
doc='Backwash flow rate [m3/min]')
self.bed_expansion = Var(initialize=0.5,
# domain=NonNegativeReals,
units=pyunits.dimensionless,
bounds=(0.4, 0.6),
doc='Resin bed expansion during backwash [%]')
self.bed_expansion_h = Var(
# initialize=0.5,
# domain=NonNegativeReals,
units=pyunits.m,
bounds=(0.1, 3),
doc='Resin bed expansion during backwash [m]')
self.bw_time.fix(6)
if self.mode == 'sba':
### BACKWASH VARIABLES
self.bw_rate = Var(initialize=6,
# domain=NonNegativeReals,
units=pyunits.m / pyunits.hour,
bounds=(4.5, 8),
doc='Backwash rate [m/hr]')
self.bw_time = Var(initialize=6,
# domain=NonNegativeReals,
units=pyunits.minute,
bounds=(4, 20),
doc='Backwash time [min]')
self.bw_flow = Var(initialize=5,
# domain=NonNegativeReals,
units=pyunits.m ** 3 / pyunits.minute,
doc='Backwash flow rate [m3/min]')
self.bed_expansion = Var(initialize=0.5,
# domain=NonNegativeReals,
units=pyunits.dimensionless,
bounds=(0.4, 0.8),
doc='Resin bed expansion during backwash [%]')
self.bed_expansion_h = Var(
# initialize=0.5,
# domain=NonNegativeReals,
units=pyunits.m,
bounds=(0.5, 3),
doc='Resin bed expansion during backwash [m]')
# self.bw_time.fix(6)
self.bw_time.fix(12)
def ix_rinse(self):
### RINSE VARIABLES
if self.mode == 'sac':
self.rinse_bv = Var(initialize=5,
# domain=NonNegativeReals,
bounds=(2, 5),
doc='Number of bed volumes for rinse step [BV]')
self.rinse_bv.fix(3)
if self.mode == 'sba':
self.rinse_bv = | |
<reponame>GavinPass/dash-sample-apps
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import pyvista as pv
import xarray as xr
from jupyter_dash import JupyterDash
import dash
import dash_html_components as html
import vtk
import dash_vtk
from dash_vtk.utils import to_volume_state
from pyvista import examples
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import os
from dash_vtk.utils import presets
import segyio
import random
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import PVGeo
# # Data Import
# ## Wells
# In[9]:
# import of the wells and store them in a dict
surveys = {}
for filename in os.listdir(r"data/Wells_seismic"):
surveys[filename[:-4]] = pd.read_csv(r"data/Wells_seismic/{}".format(filename))
# In[10]:
# generation of numpy arrays of xyz/inline-xline-z for each well
points_dict = {}
for points in surveys:
points_dict["{}".format(points)] = np.array(
list(zip(surveys[points].inline, surveys[points].xline, surveys[points].TVD))
)
# In[11]:
# generation of lines from the numpy arrays by using PVGeo for each well
lines_dict = {}
for lines in points_dict:
poly = PVGeo.points_to_poly_data(points_dict[lines])
lines_dict["{}".format(lines)] = PVGeo.filters.AddCellConnToPoints().apply(poly)
# In[12]:
# from the previous lines, a dictionary of vtk points and vtk lines was generated for each well
points_well = {}
lines_well = {}
for lines in lines_dict:
points_well[lines] = lines_dict[lines].points.ravel()
lines_well[lines] = lines_dict[lines].lines.ravel()
# ## Horizon
# In[13]:
# import of the top of the reservoir horizon
reservoir_hor = pd.read_csv(
r"data/Horizons/Hugin-reservoir.dat",
names=["x", "y", "inline", "xline", "z"],
sep="\t",
)
# to generate the vtk surface of the horizon, we are going to divide the generation of the polys and the points,
# the polys are being generated from the x, y, z data and the points from inline, xline and z, this in order to get correct the scale of the horizon.
# ### Polys
# In[14]:
# convertion of the x,y,z to numpy for polys
hor_array_polys = reservoir_hor.loc[:, ["x", "y", "z"]].to_numpy()
# use of pyvista to generate a polydata object
hor_cloud_polys = pv.PolyData(hor_array_polys)
# assign the depth coordinate as the Depth value
hor_cloud_polys["Depth"] = hor_array_polys[:, -1]
# generation of a surface from the polydata by using ptvista delaunay
surf_polys = hor_cloud_polys.delaunay_2d()
# In[15]:
# from the polydata object, we get the vtk polys for the 3d visualization
polydata_polys = surf_polys.extract_geometry()
polys_hor = vtk_to_numpy(polydata_polys.GetPolys().GetData())
# extract the depth values of the horizon and range of depth for the 3d visualization
depth = polydata_polys["Depth"]
min_depth = np.amin(depth)
max_depth = np.amax(depth)
color_range = [min_depth, max_depth]
# ### Points
# In[16]:
# convertion of the inline, xline,z to numpy for points
hor_array_points = reservoir_hor.loc[:, ["inline", "xline", "z"]].to_numpy()
# use of pyvista to generate a polydata object
hor_cloud_points = pv.PolyData(hor_array_points)
# generation of a surface from the polydata by using ptvista delaunay
surf_points = hor_cloud_points.delaunay_2d()
# In[17]:
# from the polydata object, we get the vtk points for the 3d visualization
polydata_points = surf_points.extract_geometry()
points_hor = polydata_points.points.ravel()
# ## Seismic
# In[18]:
# import of the seismic in numpy format
# seis = np.load('data/Seismic/final_cube.npy')
# seis.shape
# In[20]:
# z = seis[:,:,0:375:25]
# np.save(file='data/Seismic/final_cube_z', arr=z[:,:,:])
z = np.load("data/Seismic/final_cube_z.npy")
z_mesh = pv.wrap(z)
z_slice = (
dash_vtk.ImageData(
dimensions=[231, 392, 15],
origin=[10030, 2145, 0],
spacing=[1, 1, 250],
children=[
dash_vtk.PointData(
[
dash_vtk.DataArray(
registration="setScalars", values=z_mesh["values"] * 10000,
)
]
)
],
),
)
# In[21]:
# x = seis[0:220:20,:,:]
# np.save(file='data/Seismic/final_cube_in', arr=z[:,:,:])
x = np.load("data/Seismic/final_cube_in.npy")
x_mesh = pv.wrap(x)
x_slice = (
dash_vtk.ImageData(
dimensions=[11, 392, 376],
origin=[10030, 2145, 0],
spacing=[20, 1, 10],
children=[
dash_vtk.PointData(
[
dash_vtk.DataArray(
registration="setScalars", values=x_mesh["values"] * 10000,
)
]
)
],
),
)
# In[22]:
# y= seis[:,0:380:20,:]
# np.save(file='data/Seismic/final_cube_xl', arr=z[:,:,:])
y = np.load("data/Seismic/final_cube_xl.npy")
y_mesh = pv.wrap(y)
y_slice = (
dash_vtk.ImageData(
dimensions=[231, 19, 376],
origin=[10030, 2145, 0],
spacing=[1, 20, 10],
children=[
dash_vtk.PointData(
[
dash_vtk.DataArray(
registration="setScalars", values=y_mesh["values"] * 10000,
)
]
)
],
),
)
# ## Grid
# In[23]:
grid_points = np.array(
[
100300,
21450,
0,
100300,
25360,
0,
102600,
21450,
0,
102600,
25360,
0,
100300,
21450,
3750,
100300,
25360,
3750,
102600,
21450,
3750,
102600,
25360,
3750,
]
)
# # VTK Visualization
# ## View
# In[312]:
slice_view = dash_vtk.View(
id="slice-view",
cameraPosition=[1, 0, 0],
cameraViewUp=[0, 0, -1],
cameraParallelProjection=False,
background=[0.1137, 0.2078, 0.3411],
children=[
dash_vtk.SliceRepresentation(
id="slice-repr-in",
iSlice=5,
property={"colorWindow": 2000, "colorLevel": 0},
actor={"scale": (10, 10, 1)},
colorMapPreset="Grayscale",
children=x_slice,
),
dash_vtk.SliceRepresentation(
id="slice-repr-xl",
jSlice=8,
actor={"scale": (10, 10, 1)},
property={"colorWindow": 2000, "colorLevel": 0},
colorMapPreset="Grayscale",
children=y_slice,
),
dash_vtk.SliceRepresentation(
id="slice-repr-z",
kSlice=2,
actor={"scale": (10, 10, 1)},
property={"colorWindow": 2000, "colorLevel": 0},
colorMapPreset="Grayscale",
children=z_slice,
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_19A",
children=[
dash_vtk.PolyData(
id="vtk-polydata1",
points=points_well["15-9_19A"],
lines=lines_well["15-9_19A"],
),
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_19BT2",
children=[
dash_vtk.PolyData(
id="vtk-polydata2",
points=points_well["15-9_19BT2"],
lines=lines_well["15-9_19BT2"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_19SR",
children=[
dash_vtk.PolyData(
id="vtk-polydata3",
points=points_well["15-9_19SR"],
lines=lines_well["15-9_19SR"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_F11B",
children=[
dash_vtk.PolyData(
id="vtk-polydata4",
points=points_well["15-9_F11B"],
lines=lines_well["15-9_F11B"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_F12",
children=[
dash_vtk.PolyData(
id="vtk-polydata5",
points=points_well["15-9_F12"],
lines=lines_well["15-9_F12"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_F14",
children=[
dash_vtk.PolyData(
id="vtk-polydata6",
points=points_well["15-9_F14"],
lines=lines_well["15-9_F14"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_F15C",
children=[
dash_vtk.PolyData(
id="vtk-polydata7",
points=points_well["15-9_F15C"],
lines=lines_well["15-9_F15C"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-15-9_F1C",
children=[
dash_vtk.PolyData(
id="vtk-polydata8",
points=points_well["15-9_F1C"],
lines=lines_well["15-9_F1C"],
)
],
property={"edgeVisibility": False, "lineWidth": 100, "color": (255, 0, 0)},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-horizon",
children=[
dash_vtk.PolyData(
id="vtk-polydata9",
points=points_hor,
polys=polys_hor,
children=[
dash_vtk.PointData(
[
dash_vtk.DataArray(
id="vtk-array",
registration="setScalars",
name="Depth",
values=depth,
)
]
)
],
)
],
colorMapPreset="erdc_rainbow_bright",
colorDataRange=color_range,
property={"edgeVisibility": False},
actor={"scale": (10, 10, 1)},
),
dash_vtk.GeometryRepresentation(
id="vtk-grid",
children=[dash_vtk.PolyData(id="vtk-polydata10", points=grid_points)],
property={"edgeVisibility": False},
# actor={'scale':(10,10,1)},
showCubeAxes=True,
cubeAxesStyle={"axisLabels": ["Inline", "Xline", "Depth (m)"]},
),
],
)
# In[313]:
controls = [
dbc.FormGroup(
[
dbc.Label("Display Wells:"),
dbc.RadioItems(
options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],
value="Yes",
id="wells_id",
inline=True,
),
]
),
dbc.FormGroup(
[
dbc.Label("Display Reservoir Horizon:"),
dbc.RadioItems(
options=[
{"label": "Yes", "value": "Yes"},
{"label": "No", "value": "No"},
],
value="Yes",
id="horizon_id",
inline=True,
),
]
),
dbc.FormGroup(
[
dbc.Label("Display seismic:"),
dbc.Checklist(
options=[
{"label": "Inline", "value": "inline"},
{"label": "Xline", "value": "xline"},
{"label": "Depth Slice", "value": "z"},
],
value=["inline", "xline", "z"],
id="enabled",
inline=True,
),
]
),
dbc.FormGroup(
[
dbc.Label("Seismic controls"),
html.Br(),
html.Div(id="inline_val", style={"margin-right": 0, "margin-left": 0,}),
dcc.Slider(id="slider-in", min=0, max=11, value=5),
html.Div(id="xline_val", style={"margin-right": 0, "margin-left": 0,}),
dcc.Slider(id="slider-xl", min=0, max=19, value=9),
html.Div(id="z_val", style={"margin-right": 0, "margin-left": 0,}),
dcc.Slider(id="slider-z", min=0, max=15, value=2),
],
),
]
# In[314]:
app = dash.Dash(__name__)
server = app.server
app.layout = dbc.Container(
fluid=True,
children=[
html.Div(
[
html.H3(
"Geoscience Data Visualizer",
style={
"fontSize": "2vw",
"lineHeight": 1.3,
"letterSpacing": "-1px",
"marginBottom": "0px",
"textAlign": "center",
"marginTop": "0px",
"fontFamily": "sans-serif",
},
),
html.H5(
"Volve Dataset",
style={
"fontSize": "1.5vw",
"lineHeight": 1.5,
"letterSpacing": "-0.5px",
"marginBottom": "0px",
"textAlign": "center",
"marginTop": "0px",
"fontFamily": "sans-serif",
},
),
dcc.Markdown(
"Volve is a Norwegian Oil Field in the North Sea, that in 2018, Equinor and the Volve license partners decided to releasea a big dataset containing all kinds of data, from seismic to production related to the field, all of these under an Open License, that you can check [here.](https://www.equinor.com/content/dam/statoil/documents/what-we-do/Equinor-HRS-Terms-and-conditions-for-licence-to-data-Volve.pdf)",
style={
"fontSize": ".7vw",
"marginBottom": "1vh",
"textAlign": "center",
"marginTop": "0px",
"fontFamily": "sans-serif",
},
),
]
),
html.Div(
[
html.Div(style={"margin-bottom": "9px"}),
dbc.Row(
[
dbc.Col(
dbc.Card(
[dbc.CardBody(controls),], style={"width": "15vw"}
),
style={"margin-left": "5vw"},
width={"size": 2},
),
dbc.Col(
dbc.Card(
[dbc.CardBody(slice_view),],
style={"height": "85vh", "width": "73vw"},
),
width={"size": 9},
),
]
),
],
style={"margin-bottom": "0px"},
),
],
)
# In[315]:
@app.callback(
[
Output("inline_val", "children"),
Output("xline_val", "children"),
Output("z_val", "children"),
],
[
Input("slider-in", "value"),
Input("slider-xl", "value"),
Input("slider-z", "value"),
],
)
def inl_value(in_val, xl_val, z_val):
inl_dict = {}
inl = 100300
for i in range(0, 12):
inl_dict[i] = inl
inl += 200
in_display = inl_dict[in_val]
xl_dict = {}
xl = 21450
for i in range(0, 20):
xl_dict[i] = xl
xl += 200
xl_display = xl_dict[xl_val]
z_dict = {}
z = 0
for i in range(0, 16):
z_dict[i] = z
z += 250
z_display = z_dict[z_val]
return (
f"Inlines: {in_display}",
f"Xlines: {xl_display}",
f"Depth slice: {z_display} m",
)
@app.callback(
[
Output("slice-view", "triggerRender"),
Output("slice-repr-in", "iSlice"),
Output("slice-repr-xl", "jSlice"),
Output("slice-repr-z", "kSlice"),
Output("vtk-15-9_19A", "actor"),
Output("vtk-15-9_19BT2", "actor"),
Output("vtk-15-9_19SR", "actor"),
Output("vtk-15-9_F11B", "actor"),
Output("vtk-15-9_F12", "actor"),
Output("vtk-15-9_F14", "actor"),
Output("vtk-15-9_F15C", "actor"),
Output("vtk-15-9_F1C", "actor"),
Output("vtk-horizon", "actor"),
Output("slice-repr-in", "actor"),
Output("slice-repr-xl", "actor"),
Output("slice-repr-z", "actor"),
],
[
Input("slider-in", "value"),
Input("slider-xl", "value"),
Input("slider-z", "value"),
Input("wells_id", "value"),
Input("horizon_id", "value"),
Input("enabled", "value"),
],
)
def update_slice_property(i, j, k, well, hor, seis):
render_call = random.random()
if well == "No":
act = {"scale": (10, 10, 1), "visibility": 0}
act2 = {"scale": (10, 10, 1), "visibility": 0}
act3 = {"scale": (10, 10, 1), "visibility": 0}
act4 = {"scale": (10, 10, 1), "visibility": 0}
act5 = {"scale": (10, 10, 1), "visibility": 0}
act6 = {"scale": (10, 10, 1), "visibility": 0}
act7 = {"scale": (10, 10, 1), "visibility": 0}
act8 | |
# 查询文档信息
project = Project.objects.get(id=doc.top_doc) # 查询文档所属的文集信息
pro_colla = ProjectCollaborator.objects.filter(project=project, user=request.user) # 查询用户的协作文集信息
if (request.user == doc.create_user) or (pro_colla[0].role == 1):
history = DocHistory.objects.get(id=his_id)
if history.doc == doc:
return JsonResponse({'status':True,'data':history.pre_content})
else:
return JsonResponse({'status': False, 'data': '非法请求'})
else:
return JsonResponse({'status':False,'data':'非法请求'})
except Exception as e:
logger.exception("文档历史版本获取出错")
return JsonResponse({'status':False,'data':'获取异常'})
# 管理文档历史版本
@login_required()
@require_http_methods(['GET',"POST"])
def manage_doc_history(request,doc_id):
if request.method == 'GET':
try:
doc = Doc.objects.get(id=doc_id,create_user=request.user)
history_list = DocHistory.objects.filter(create_user=request.user,doc=doc_id).order_by('-create_time')
paginator = Paginator(history_list, 15)
page = request.GET.get('page', 1)
try:
historys = paginator.page(page)
except PageNotAnInteger:
historys = paginator.page(1)
except EmptyPage:
historys = paginator.page(paginator.num_pages)
return render(request, 'app_doc/manage_doc_history.html', locals())
except Exception as e:
logger.exception("管理文档历史版本页面访问出错")
return render(request, '404.html')
elif request.method == 'POST':
try:
history_id = request.POST.get('history_id','')
DocHistory.objects.filter(id=history_id,doc=doc_id,create_user=request.user).delete()
return JsonResponse({'status':True,'data':'删除成功'})
except:
logger.exception("操作文档历史版本出错")
return JsonResponse({'status':False,'data':'出现异常'})
# 文档回收站
@login_required()
@require_http_methods(['GET','POST'])
def doc_recycle(request):
if request.method == 'GET':
# 获取状态为删除的文档
doc_list = Doc.objects.filter(status=3,create_user=request.user).order_by('-modify_time')
# 分页处理
paginator = Paginator(doc_list, 15)
page = request.GET.get('page', 1)
try:
docs = paginator.page(page)
except PageNotAnInteger:
docs = paginator.page(1)
except EmptyPage:
docs = paginator.page(paginator.num_pages)
return render(request,'app_doc/manage_doc_recycle.html',locals())
elif request.method == 'POST':
try:
# 获取参数
doc_id = request.POST.get('doc_id', None) # 文档ID
types = request.POST.get('type',None) # 操作类型
if doc_id:
# 查询文档
try:
doc = Doc.objects.get(id=doc_id)
project = Project.objects.get(id=doc.top_doc) # 查询文档所属的文集
# 获取文档所属文集的协作信息
pro_colla = ProjectCollaborator.objects.filter(project=project, user=request.user) #
if pro_colla.exists():
colla_user_role = pro_colla[0].role
else:
colla_user_role = 0
except ObjectDoesNotExist:
return JsonResponse({'status': False, 'data': '文档不存在'})
# 如果请求用户为文档创建者、高级权限的协作者、文集的创建者,可以操作
if (request.user == doc.create_user) or (colla_user_role == 1) or (request.user == project.create_user):
# 还原文档
if types == 'restore':
# 修改状态为草稿
doc.status = 0
doc.modify_time = datetime.datetime.now()
doc.save()
# 删除文档
elif types == 'del':
# 删除文档
doc.delete()
else:
return JsonResponse({'status':False,'data':'无效请求'})
return JsonResponse({'status': True, 'data': '删除完成'})
else:
return JsonResponse({'status': False, 'data': '非法请求'})
# 清空回收站
elif types == 'empty':
docs = Doc.objects.filter(status=3,create_user=request.user)
docs.delete()
return JsonResponse({'status': True, 'data': '清空成功'})
# 还原回收站
elif types == 'restoreAll':
Doc.objects.filter(status=3,create_user=request.user).update(status=0)
return JsonResponse({'status': True, 'data': '还原成功'})
else:
return JsonResponse({'status': False, 'data': '参数错误'})
except Exception as e:
logger.exception("处理文档出错")
return JsonResponse({'status': False, 'data': '请求出错'})
# 一键发布文档
@login_required()
@require_http_methods(['POST'])
def fast_publish_doc(request):
doc_id = request.POST.get('doc_id',None)
# 查询文档
try:
doc = Doc.objects.get(id=doc_id)
project = Project.objects.get(id=doc.top_doc) # 查询文档所属的文集
# 获取文档所属文集的协作信息
pro_colla = ProjectCollaborator.objects.filter(project=project, user=request.user) #
if pro_colla.exists():
colla_user_role = pro_colla[0].role
else:
colla_user_role = 0
except ObjectDoesNotExist:
return JsonResponse({'status': False, 'data': '文档不存在'})
# 判断请求者是否有权限(文档创建者、文集创建者、文集高级协作者)
# 如果请求用户为文档创建者、高级权限的协作者、文集的创建者,可以删除
if (request.user == doc.create_user) or (colla_user_role == 1) or (request.user == project.create_user):
try:
doc.status = 1
doc.modify_time = datetime.datetime.now()
doc.save()
return JsonResponse({'status':True,'data':'发布成功'})
except:
logger.exception("文档一键发布失败")
return JsonResponse({'status':False,'data':'发布失败'})
else:
return JsonResponse({'status':False,'data':'非法请求'})
# 创建文档模板
@login_required()
@require_http_methods(['GET',"POST"])
def create_doctemp(request):
if request.method == 'GET':
doctemps = DocTemp.objects.filter(create_user=request.user)
return render(request,'app_doc/create_doctemp.html',locals())
elif request.method == 'POST':
try:
name = request.POST.get('name','')
content = request.POST.get('content','')
if name != '':
doctemp = DocTemp.objects.create(
name = name,
content = content,
create_user=request.user
)
doctemp.save()
return JsonResponse({'status':True,'data':'创建成功'})
else:
return JsonResponse({'status':False,'data':'模板标题不能为空'})
except Exception as e:
logger.exception("创建文档模板出错")
return JsonResponse({'status':False,'data':'请求出错'})
# 修改文档模板
@login_required()
@require_http_methods(['GET',"POST"])
def modify_doctemp(request,doctemp_id):
if request.method == 'GET':
try:
doctemp = DocTemp.objects.get(id=doctemp_id)
if request.user.id == doctemp.create_user.id:
doctemps = DocTemp.objects.filter(create_user=request.user)
return render(request,'app_doc/modify_doctemp.html',locals())
else:
return HttpResponse('非法请求')
except Exception as e:
logger.exception("访问文档模板修改页面出错")
return render(request, '404.html')
elif request.method == 'POST':
try:
doctemp_id = request.POST.get('doctemp_id','')
name = request.POST.get('name','')
content = request.POST.get('content','')
if doctemp_id != '' and name !='':
doctemp = DocTemp.objects.get(id=doctemp_id)
if request.user.id == doctemp.create_user.id:
doctemp.name = name
doctemp.content = content
doctemp.save()
return JsonResponse({'status':True,'data':'修改成功'})
else:
return JsonResponse({'status':False,'data':'非法操作'})
else:
return JsonResponse({'status':False,'data':'参数错误'})
except Exception as e:
logger.exception("修改文档模板出错")
return JsonResponse({'status':False,'data':'请求出错'})
# 删除文档模板
@login_required()
def del_doctemp(request):
try:
doctemp_id = request.POST.get('doctemp_id','')
if doctemp_id != '':
doctemp = DocTemp.objects.get(id=doctemp_id)
if request.user.id == doctemp.create_user.id:
doctemp.delete()
return JsonResponse({'status':True,'data':'删除完成'})
else:
return JsonResponse({'status':False,'data':'非法请求'})
else:
return JsonResponse({'status': False, 'data': '参数错误'})
except Exception as e:
logger.exception("删除文档模板出错")
return JsonResponse({'status':False,'data':'请求出错'})
# 管理文档模板
@login_required()
@require_http_methods(['GET'])
def manage_doctemp(request):
try:
search_kw = request.GET.get('kw', None)
if search_kw:
doctemp_list = DocTemp.objects.filter(
create_user=request.user,
content__icontains=search_kw
).order_by('-modify_time')
paginator = Paginator(doctemp_list, 10)
page = request.GET.get('page', 1)
try:
doctemps = paginator.page(page)
except PageNotAnInteger:
doctemps = paginator.page(1)
except EmptyPage:
doctemps = paginator.page(paginator.num_pages)
doctemps.kw = search_kw
else:
doctemp_list = DocTemp.objects.filter(create_user=request.user).order_by('-modify_time')
paginator = Paginator(doctemp_list, 10)
page = request.GET.get('page', 1)
try:
doctemps = paginator.page(page)
except PageNotAnInteger:
doctemps = paginator.page(1)
except EmptyPage:
doctemps = paginator.page(paginator.num_pages)
return render(request, 'app_doc/manage_doctemp.html', locals())
except Exception as e:
logger.exception("管理文档模板页面访问出错")
return render(request, '404.html')
# 获取指定文档模板
@login_required()
@require_http_methods(["POST"])
def get_doctemp(request):
try:
doctemp_id = request.POST.get('doctemp_id','')
if doctemp_id != '':
content = DocTemp.objects.get(id=int(doctemp_id)).serializable_value('content')
return JsonResponse({'status':True,'data':content})
else:
return JsonResponse({'status':False,'data':'参数错误'})
except Exception as e:
logger.exception("获取指定文档模板出错")
return JsonResponse({'status':False,'data':'请求出错'})
# 获取指定文集的所有文档
@require_http_methods(["POST"])
@logger.catch()
def get_pro_doc(request):
pro_id = request.POST.get('pro_id','')
if pro_id != '':
# 获取文集所有文档的id、name和parent_doc3个字段
doc_list = Doc.objects.filter(top_doc=int(pro_id),status=1).values_list('id','name','parent_doc').order_by('parent_doc')
item_list = []
# 遍历文档
for doc in doc_list:
# 如果文档为顶级文档
if doc[2] == 0:
# 将其数据添加到列表中
item = [
doc[0],doc[1],doc[2],''
]
item_list.append(item)
# 如果文档不是顶级文档
else:
# 查询文档的上级文档
try:
parent = Doc.objects.get(id=doc[2])
except ObjectDoesNotExist:
return JsonResponse({'status':False,'data':'文档id不存在'})
# 如果文档上级文档的上级是顶级文档,那么将其添加到列表
if parent.parent_doc == 0: # 只要二级目录
item = [
doc[0],doc[1],doc[2],parent.name+' --> '
]
item_list.append(item)
return JsonResponse({'status':True,'data':list(item_list)})
else:
return JsonResponse({'status':False,'data':'参数错误'})
# 获取指定文集的文档树数据
@login_required()
@require_http_methods(['POST'])
@logger.catch()
def get_pro_doc_tree(request):
pro_id = request.POST.get('pro_id', None)
if pro_id:
# 获取一级文档
doc_list = []
top_docs = Doc.objects.filter(top_doc=pro_id,parent_doc=0,status=1).order_by('sort')
for doc in top_docs:
top_item = {
'id':doc.id,
'field':doc.name,
'title':doc.name,
'spread':True,
'level':1
}
# 获取二级文档
sec_docs = Doc.objects.filter(top_doc=pro_id,parent_doc=doc.id,status=1).order_by('sort')
if sec_docs.exists():# 二级文档
top_item['children'] = []
for doc in sec_docs:
sec_item = {
'id': doc.id,
'field': doc.name,
'title': doc.name,
'level':2
}
# 获取三级文档
thr_docs = Doc.objects.filter(top_doc=pro_id,parent_doc=doc.id,status=1).order_by('sort')
if thr_docs.exists():
sec_item['children'] = []
for doc in thr_docs:
item = {
'id': doc.id,
'field': doc.name,
'title': doc.name,
'level': 3
}
sec_item['children'].append(item)
top_item['children'].append(sec_item)
else:
top_item['children'].append(sec_item)
doc_list.append(top_item)
else:
doc_list.append(top_item)
return JsonResponse({'status':True,'data':doc_list})
else:
return JsonResponse({'status':False,'data':'参数错误'})
# 404页面
def handle_404(request):
return render(request,'404.html')
# 导出文集MD文件
@login_required()
@require_http_methods(["POST"])
def report_md(request):
pro_id = request.POST.get('project_id','')
user = request.user
try:
project = Project.objects.get(id=int(pro_id))
if project.create_user == user:
project_md = ReportMD(
project_id=int(pro_id)
)
md_file_path = project_md.work() # 生成并获取MD文件压缩包绝对路径
md_file_filename = os.path.split(md_file_path)[-1] # 提取文件名
md_file = "/media/reportmd_temp/"+ md_file_filename # 拼接相对链接
return JsonResponse({'status':True,'data':md_file})
else:
return JsonResponse({'status':False,'data':'无权限'})
except Exception as e:
logger.exception("导出文集MD文件出错")
return JsonResponse({'status':False,'data':'文集不存在'})
# 生成文集文件 - 个人中心 - 文集管理
@login_required()
@require_http_methods(["POST"])
def genera_project_file(request):
report_type = request.POST.get('types',None) # 获取前端传入到导出文件类型参数
# 导出EPUB文件
pro_id = request.POST.get('pro_id')
try:
project = Project.objects.get(id=int(pro_id))
# 获取文集的协作用户信息
if request.user.is_authenticated:
colla_user = ProjectCollaborator.objects.filter(project=project, user=request.user)
if colla_user.exists():
colla_user_role = colla_user[0].role
colla_user = colla_user.count()
else:
colla_user = colla_user.count()
else:
colla_user = 0
# 公开的文集 - 可以直接导出
if project.role == 0:
allow_export = True
# 私密文集 - 非创建者和协作者不可导出
elif (project.role == 1):
if (request.user != project.create_user) and (colla_user == 0):
allow_export = False
else:
allow_export = True
# 指定用户可见文集 - 指定用户、文集创建者和协作者可导出
elif project.role == 2:
user_list = project.role_value
if request.user.is_authenticated: # 认证用户判断是否在许可用户列表中
if (request.user.username not in user_list) and \
(request.user != project.create_user) and \
(colla_user == 0): # 访问者不在指定用户之中,也不是协作者
allow_export = False
else:
allow_export = True
else: # 游客直接返回404
allow_export = False
# 访问码可见文集 - 文集创建者、协作者和通过验证即可导出
elif project.role == 3:
# 浏览用户不为创建者和协作者 - 需要访问码
if (request.user != project.create_user) and (colla_user == 0):
viewcode = project.role_value
viewcode_name = 'viewcode-{}'.format(project.id)
r_viewcode = request.COOKIES[
viewcode_name] if viewcode_name in request.COOKIES.keys() else 0 # 从cookie中获取访问码
if viewcode != r_viewcode: # cookie中的访问码不等于文集访问码,不可导出
allow_export = False
else:
allow_export = True
else:
allow_export = True
else:
allow_export = False
# 允许被导出
if allow_export:
# 导出EPUB
if report_type in ['epub']:
try:
report_project = ReportEPUB(
project_id=project.id
).work()
# print(report_project)
report_file_path = report_project.split('media', maxsplit=1)[-1] # 导出文件的路径
epub_file = '/media' + report_file_path + '.epub' # 文件相对路径
# 查询文集是否存在导出文件
report_cnt = ProjectReportFile.objects.filter(project=project,file_type='epub')
# 存在文件删除
if report_cnt.count() != 0:
for r in report_cnt:
is_exist = os.path.exists(settings.BASE_DIR + r.file_path)
if is_exist:
os.remove(settings.BASE_DIR + r.file_path)
report_cnt.delete() # 删除数据库记录
# 创建数据库记录
ProjectReportFile.objects.create(
project=project,
file_type='epub',
file_name=epub_file,
file_path=epub_file
)
return JsonResponse({'status': True, 'data': epub_file})
except Exception as e:
return JsonResponse({'status': False, 'data': '生成出错'})
# 导出PDF
elif report_type in ['pdf']:
try:
report_project = ReportPDF(
project_id=project.id
).work()
if report_project is False:
return JsonResponse({'status':False,'data':'生成出错'})
report_file_path = report_project.split('media', maxsplit=1)[-1] # 导出文件的路径
pdf_file = '/media' + report_file_path # 文件相对路径
# 查询文集是否存在导出文件
report_cnt = ProjectReportFile.objects.filter(project=project, file_type='pdf')
# 存在文件删除
if report_cnt.count() != 0:
for r in report_cnt:
is_exist = os.path.exists(settings.BASE_DIR + r.file_path)
if is_exist:
os.remove(settings.BASE_DIR + r.file_path)
report_cnt.delete() # 删除数据库记录
# 创建数据库记录
ProjectReportFile.objects.create(
project=project,
file_type='pdf',
file_name=pdf_file,
file_path=pdf_file
)
return JsonResponse({'status': True, 'data': pdf_file})
except Exception as e:
return JsonResponse({'status': False, 'data': '生成出错'})
else:
return JsonResponse({'status': False, 'data': '不支持的类型'})
# 不允许被导出
else:
return JsonResponse({'status':False,'data':'无权限导出'})
except ObjectDoesNotExist:
return JsonResponse({'status':False,'data':'文集不存在'})
except Exception as e:
logger.exception("生成文集文件出错")
return JsonResponse({'status':False,'data':'系统异常'})
# 获取文集前台导出文件
@allow_report_file
@require_http_methods(["POST"])
def report_file(request):
report_type = request.POST.get('types',None) # 获取前端传入到导出文件类型参数
pro_id = request.POST.get('pro_id')
try:
project = Project.objects.get(id=int(pro_id))
# 获取文集的协作用户信息
if request.user.is_authenticated:
colla_user = ProjectCollaborator.objects.filter(project=project, user=request.user)
if colla_user.exists():
colla_user_role = colla_user[0].role
colla_user = colla_user.count()
else:
colla_user = colla_user.count()
else:
colla_user = 0
# 公开的文集 - 可以直接导出
if project.role == 0:
allow_export = True
# 私密文集 - 非创建者和协作者不可导出
elif (project.role == 1):
if (request.user | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
########################################################################################################################
#
# Copyright (c) 2018, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""
########################################################################################################################
README
This is the layout generator for Double Tail Sense Amplifier (DTSA)
This generator uses a particular coding style in an attempt to organize all the parameters and simplify the
complexity of the schematic and layout generator codes.
The style/use of row/transistor data structures are by no means a necessary style when using BAG
In particular:
- Transistor information (row, column location, size, orientation, etc) are stored in per-transistor
dictionaries
- Row information (name, orientation, width, threshold, etc) are stored in per-row dictionaries
- Lists of the rows in this analogBase class are used to store information about rows
- Many helper functions are defined to simplify the placement/alignment of transistors
- Transistor ports ('s', 'd', 'g') are stored in the transistor dictionaries to make access more intuitive
########################################################################################################################
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# noinspection PyUnresolvedReferences,PyCompatibility
from builtins import *
from typing import Dict, Any, Set, Optional, Union, List
from bag.layout.routing import TrackManager, TrackID, WireArray
from bag.layout.template import TemplateDB
from abs_templates_ec.analog_core import AnalogBase
class DoubleTailSenseAmplifier(AnalogBase):
"""A Double-tail Sense Amplifier with NMOS input pair
Parameters
----------
temp_db : TemplateDB
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None
AnalogBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._sch_params = None
################################################################################
# Define global variables for holding transistor/row information
################################################################################
self._global_rows = []
self._global_nrows = []
self._global_prows = []
self._w_dict = None
self._th_dict = None
self._seg_dict = None
self._ptap_w = None
self._ntap_w = None
self.wire_names = None
@property
def sch_params(self):
# type: () -> Dict[str, Any]
return self._sch_params
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
"""Returns a dictionary containing default parameter values.
Override this method to define default parameter values. As good practice,
you should avoid defining default values for technology-dependent parameters
(such as channel length, transistor width, etc.), but only define default
values for technology-independent parameters (such as number of tracks).
Returns
-------
default_params : dict[str, any]
dictionary of default parameter values.
"""
return dict(
flip_out_sd=False,
guard_ring_nf=0,
top_layer=None,
show_pins=True,
)
@classmethod
def get_params_info(cls):
"""Returns a dictionary containing parameter descriptions.
Override this method to return a dictionary from parameter names to descriptions.
Returns
-------
param_info : dict[str, str]
dictionary from parameter name to description.
"""
return dict(
top_layer='the top routing layer.',
tr_widths='Track width dictionary.',
tr_spaces='Track spacing dictionary.',
show_pins='True to create pin labels.',
lch='channel length, in meters.',
ptap_w='NMOS substrate width, in meters/number of fins.',
ntap_w='PMOS substrate width, in meters/number of fins.',
w_dict='NMOS/PMOS width dictionary.',
th_dict='NMOS/PMOS threshold flavor dictionary.',
seg_dict='NMOS/PMOS number of segments dictionary.',
fg_dum='Number of single-sided edge dummy fingers.',
)
def get_row_index(self,
row, # type: Dict
):
"""
Returns the index of the row within the nch or pch rows
Parameters
----------
row : Dict
the row whose index should be returned
Returns
-------
index : int
the index of the row
"""
# type: (...) -> int
if row['type'] == 'nch':
return self._global_nrows.index(row)
else:
return self._global_prows.index(row)
def get_row_from_rowname(self,
rowname, # type: str
):
"""
Returns the row dictionary cooresponding to the provided row name
Parameters
----------
rowname : str
the name of the row whose information should be returned
Returns
-------
output : Dict
the row information, or None if the row cannot be found
"""
# type: (...) -> Union[Dict, None]
output = None
for ind, row in enumerate(self._global_rows):
if row['name'] == rowname:
output = row
return output
def set_global_rows(self,
row_list, # type: List[Dict]
):
# type: (...) -> None
"""
Given an ordered list of rows (from bottom of design to top), sets up the global variables used for accessing
row properties.
Parameters
----------
row_list : List
The ordered list of row dictionaries. To abide by analogBase restrictions, all nch rows must come before
pch rows
Returns
-------
"""
self._global_rows = row_list
n_ind = 0
p_ind = 0
for ind, row in enumerate(row_list):
row['global_index'] = ind
if row['type'] == 'nch':
row['index'] = n_ind
n_ind += 1
self._global_nrows.append(row)
elif row['type'] == 'pch':
row['index'] = p_ind
p_ind += 1
self._global_prows.append(row)
else:
raise ValueError('Row type must be "nch" or "pch" to indicate MOS type')
def initialize_rows(self,
row_name, # type: str
orient, # type: str
nch_or_pch, # type: str
):
# type : (...) -> Dict[str, Union[float, str, int]]
"""
Initializes a data structure to hold useful information for each row
Parameters
----------
row_name : str
the name to give the row. Should match the python variable name storing the row dictionary
orient : str
either 'R0' or 'MX' to specify whether the row is oriented gate down or gate up
nch_or_pch : str
either 'nch' to specify an nch row or 'pch' to specify a pch row
Returns
-------
"""
if row_name not in self._w_dict.keys():
raise ValueError("row_name '{}' must appear in w_dict".format(row_name))
if row_name not in self._th_dict.keys():
raise ValueError("row_name '{}' must appear in th_dict".format(row_name))
if not ((orient == 'R0') or (orient == 'MX')):
raise ValueError("orient parameter must be 'R0' or 'MX' for transistor row {}".format(row_name))
row = {
'name': row_name,
'width': self._w_dict[row_name],
'th': self._th_dict[row_name],
'orient': orient,
'type': nch_or_pch
}
return row
def _row_prop(self,
property_name, # type: str
nch_or_pch # type: str
):
# type: (...) -> List
"""
Returns a list of the given property type for all rows of the nch or pch type specified.
Useful helper function for draw_base
Parameters
----------
property_name : str
The row property to be returned
nch_or_pch : str
either 'nch' or 'pch', to return the property for all nch rows or pch rows respectively
Returns
-------
prop_list : List
A list of the given property for all nch rows or pch rows
"""
prop_list = []
for row in self._global_rows:
if row['type'] == nch_or_pch:
prop_list += [row[property_name]]
return prop_list
def initialize_tx(self,
name, # type: str
row, # type: Dict
fg_spec, # type: Union[str, int]
seff_net=None, # type: Optional[str]
deff_net=None, # type: Optional[str]
):
# type: (...) -> Dict[str, Union[str, int, float, Dict, WireArray]]
"""
Initialize the transistor data structure.
s_net and d_net are the source and drain nets of the transistor in the schematic. i.e. it is the effective
source and drain connections, regardless of whether the source is drawn on the even or odd diffusion regions
Parameters
----------
name : str
name of the transistor. Should match the python variable used to store the transistor dictionary
row : Dict
the row dictionary for the row this transistor will be placed on
fg_spec : Union[str, int]
Either)
- the name of the dictionary | |
<reponame>razariah/UC_BERKELEY_DATA_ANALYTICS<filename>3_Py_Poll_with_Python/Python_practice.py
print(type(3))
ballots = 1341
type("Hello World")
type(True)
num_candidates = 3
winning_percentage = 73.81
candidate = "Diane"
won_election = True
print(num_candidates)
print(winning_percentage)
print(candidate)
print(won_election)
help("keywords")
print((5 + 2) * 3)
print((8 // 5) - 3)
print(8 + (22 * (2 - 4)))
print(16 - 3 / (2 + 7) - 1)
print(3 ** (3 % 5))
print(5 + (9 * 3 / 2 - 4))
print(5 + (9 * 3 / 2 - 4))
counties = ["Arapahoe","Denver","Jefferson"]
print(counties)
counties
counties [0]
print (counties [0])
print(counties[-1])
print(counties[-2])
print(counties[-3])
len(counties)
print(counties[0:2])
print(counties[0:1])
counties[1:]
counties.append("El Paso")
counties
print(counties.append("El Paso"))
(counties.insert(2, "El Paso"))
print(len (counties))
print (counties)
print (counties.insert(2, "El Paso"))
counties
counties.remove("El Paso")
print (counties)
counties.pop(3)
print(counties)
counties.remove("El Paso")
counties.pop(3)
print(counties)
counties[2] = "El Paso"
print(counties)
print (counties.insert(2, 'Denver'))
print(counties)
counties.pop(3)
counties.pop(2)
print(counties)
print (counties.insert(2, "Jefferson"))
print(counties)
counties = ["Arapahoe","Denver","Jefferson"]
counties.append("<NAME>")
counties.remove("Arapahoe")
counties.append("Denver")
counties.append("Jefferson")
print(counties)
my_tuple = ( )
my_tuple = tuple()
counties_tuple = ("Arapahoe","Denver","Jefferson")
print (counties_tuple)
len(counties_tuple)
counties_tuple[1]
#{key:value}
my_dictionary = {}
my_dictionary = dict()
counties_dict = {}
counties_dict = {}
counties_dict["Arapahoe"] = 422829
counties_dict["Denver"] = 463353
counties_dict["Jefferson"] = 432438
counties_dict
len(counties_dict)
#If we add the items() method to the end of counties_dict, we’ll get this output:
#>>> counties_dict.items()
#dict_items([('Arapahoe', 422829), ('Denver', 463353), ('Jefferson', 432438)])
#>>>
#To get only the keys from a dictionary, add the keys() method at the end of the dictionary
counties_dict.items()
print(counties_dict.keys())
#To retrieve only the values from a dictionary, add the values() method to the end of the dictionary
counties_dict.values()
print(counties_dict.get("Denver"))
counties_dict["Arapahoe"]
#To get only the keys from a dictionary, add the keys() method at the end of the dictionary
counties_dict.keys()
counties_dict = {}
counties_dict["Arapahoe"] = 422829
counties_dict["Denver"] = 463353
counties_dict["Jefferson"] = 432438
print (len(counties_dict))
print(counties_dict.items())
print(counties_dict.keys())
print(counties_dict.values())
print(counties_dict.get("Denver"))
counties_dict["Arapahoe"]
voting_data = []
voting_data.append({"county":"Arapahoe", "registered_voters": 422829})
voting_data.append({"county":"Denver", "registered_voters": 463353})
voting_data.append({"county":"Jefferson", "registered_voters": 432438})
voting_data
# NO OUTPUT PRINTED
print(len(voting_data))
voting_data
[{'county': 'Arapahoe', 'registered_voters': 422829}, {'county': 'Denver', 'registered_voters': 463353}, {'county': 'Jefferson', 'registered_voters': 432438}]
# How many votes did you get?
my_votes = int(input("How many votes did you get in the election? "))
print (my_votes)
# Total votes in the election
total_votes = int(input("What is the total votes in the election? "))
# Calculate the percentage of votes you received.
percentage_votes = (my_votes / total_votes) * 100
print("I received " + str(percentage_votes)+"% of the total votes.")
counties = ["Arapahoe","Denver","Jefferson"]
if counties[0] == 'Arapahoe':
print(counties[0])
counties = ["Arapahoe","Denver","Jefferson"]
if counties[1] == 'Denver':
print(counties[1])
temperature = int(input("What is the temperature outside? "))
if temperature > 80:
print("Turn on the AC.")
else:
print("Open the windows.")
#What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
else:
if score >= 80:
print('Your grade is a B.')
else:
if score >= 70:
print('Your grade is a C.')
else:
if score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
# What is the score?
score = int(input("What is your test score? "))
# Determine the grade.
if score >= 90:
print('Your grade is an A.')
elif score >= 80:
print('Your grade is a B.')
elif score >= 70:
print('Your grade is a C.')
elif score >= 60:
print('Your grade is a D.')
else:
print('Your grade is an F.')
counties = ["Arapahoe","Denver","Jefferson"]
if "El Paso" in counties:
print("El Paso is in the list of counties.")
else:
print("El Paso is not the list of counties.")
if "Arapahoe" in counties and "El Paso" in counties:
print("Arapahoe and El Paso are in the list of counties.")
else:
print("Arapahoe or El Paso is not in the list of counties.")
if "Arapahoe" in counties or "El Paso" in counties:
print("Arapahoe or El Paso is in the list of counties.")
else:
print("Arapahoe and El Paso are not in the list of counties.")
x = 0
while x <= 5:
print(x)
x = x + 1
for county in counties:
print(county)
numbers = [0, 1, 2, 3, 4]
for num in numbers:
print(num)
for num in range(5):
print(num)
for i in range(len(counties)):
print(counties[i])
counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
for county in counties_dict:
print(county)
for county in counties_dict.keys():
print(county)
for voters in counties_dict.values():
print(voters)
for county, voters in counties_dict.items():
print(county, voters)
voting_data = [{"county":"Arapahoe", "registered_voters": 422829},
{"county":"Denver", "registered_voters": 463353},
{"county":"Jefferson", "registered_voters": 432438}]
for county_dict in voting_data:
print(county_dict)
for county_dict in voting_data:
for value in county_dict.values():
print(value)
for county_dict in voting_data:
print(county_dict['county'])
#And here’s how you would edit the code to use f-strings.
my_votes = int(input("How many votes did you get in the election? "))
total_votes = int(input("What is the total votes in the election? "))
print(f"I received {my_votes / total_votes * 100}% of the total votes.")
counties_dict = {"Arapahoe": 369237, "Denver":413229, "Jefferson": 390222}
for county, voters in counties_dict.items():
print(county + " county has " + str(voters) + " registered voters.")
for county, voters in counties_dict.items():
print(f"{county} county has {voters} registered voters.")
candidate_votes = int(input("How many votes did the candidate get in the election? "))
total_votes = int(input("What is the total number of votes in the election? "))
message_to_candidate = (
f"You received {candidate_votes} number of votes. "
f"The total number of votes in the election was {total_votes}. "
f"You received {candidate_votes / total_votes * 100}% of the total votes.")
print(message_to_candidate)
#expected response: You received 3345 number of votes. The total number of votes in the election was 23123. You received 14.466115988409808% of the total votes.
message_to_candidate = (
f"You received {candidate_votes:,} number of votes. "
f"The total number of votes in the election was {total_votes:,}. "
f"You received {candidate_votes / total_votes * 100:.2f}% of the total votes.")
#When this file is run in VS Code, the output will look like this when you
#NO RESPONSE
counties_dict = {"Arapahoe": 422829, "Denver": 463353, "Jefferson": 432438}
#NO RESPONSE
# Creates a variable with a string "Frankfurter"
title = "Frankfurter"
# Creates a variable with an integer 80
years = 80
# Creates a variable with the boolean value of True
expert_status = True
# Prints a statement adding the variable
print("Nick is a professional " + title)
# Convert the integer years into a string and prints
print("He has been coding for " + str(years) + " years")
# Converts a boolean into a string and prints
print("Expert status: " + str(expert_status))
# An f-string accepts all data types without conversion
print(f"Expert status: {expert_status}")
# Create a variable called 'name' that holds a string
name = "<NAME>"
print (name)
# Calculate the daily wage for the user
hourly_wage=15
daily_wage = hourly_wage * 8
print ("daily_wage")
print ("hourly_wage")
print (daily_wage)
# Create a Python list to store your grocery list
grocery_list = ["Milk", "Bread", "Eggs", "Peanut Butter", "Jelly"]
# Print the grocery list
print(grocery_list)
# Change "Peanut Butter" to "Almond Butter" and print out the updated list
grocery_list[3] = "Almond Butter"
print(grocery_list)
# Remove "Jelly" from grocery list and print out the updated list
grocery_list.remove("Jelly")
print(grocery_list)
# Add "Coffee" to grocery list and print the updated list
grocery_list.append("Coffee")
print(grocery_list)
# Create a variable and set it as an List
myList = ["Jacob", 25, "Ahmed", 80]
print(myList)
# Adds an element onto the end of a List
myList.append("Matt")
print(myList)
# Returns the index of the first object with a matching value
print(myList.index("Matt"))
# Changes a specified element within an List at the given index
myList[3] = 85
print(myList)
# Returns the length of the List
print(len(myList))
# Removes a specified object from an List
myList.remove("Matt")
print(myList)
# Removes the object at the index specified
myList.pop(0)
myList.pop(0)
print(myList)
# Creates a tuple, a sequence of immutable Python objects that cannot be changed
myTuple = ('Python', 100, 'VBA', False)
print(myTuple)
# Dictionary full of info
my_info = {"name": "Rex",
"occupation": "dog",
"age": 21,
"hobbies": ["barking", "eating", "sleeping", "loving my owner"],
"wake-up": {"Mon": 5, "Friday": 5, "Saturday": 10, "Sunday": 9}}
# Print out results are stored in the dictionary
print(f'Hello I am {my_info["name"]} and I am a {my_info["occupation"]}')
print(f'I have {len(my_info["hobbies"])} hobbies!')
print(f'On the weekend I get up at {my_info["wake-up"]["Saturday"]}')
# Unlike lists, dictionaries store information in pairs
# ---------------------------------------------------------------
# A list of actors
actors = ["<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"]
# A dictionary of an actor
actor = {"name": "<NAME>"}
print(f'{actor["name"]}')
# ---------------------------------------------------------------
# A dictionary can contain multiple pairs of information
actress = {
"name": "<NAME>",
"genre": "Action",
"nationality": "United States"
}
# ---------------------------------------------------------------
# A dictionary can contain multiple types of information
another_actor = {
"name": "<NAME>",
"age": 62,
"married": True,
"best movies": [
"Rocky",
"Rocky 2",
"Rocky 3"]}
print(f'{another_actor["name"]} was in {another_actor["best movies"][0]}')
# ---------------------------------------------------------------
# A dictionary can even contain another dictionary
film = {
"title": "Interstellar",
"revenues": {
"United States": 360,
"China": 250,
"United Kingdom": 73
}
}
print(f'{film["title"]} made {film["revenues"]["United States"]}'" in the US.")
# ---------------------------------------------------------------
# 1.
x = 5
y = 10
if 2 * x > 10:
print("Question 1 works!")
else:
print("oooo needs some work")
# 2.
x = 5
y = 10
if len("Dog") < x:
print("Question 2 works!")
else:
print("Still missing out")
# 3.
x = 2
y = 5
if (x ** 3 >= y) and (y ** 2 < 26):
print("GOT QUESTION 3!")
else:
print("Oh good you can count")
# 4.
name = "Dan"
group_one = ["Greg", "Tony", "Susan"]
group_two = ["Gerald", "Paul", "Ryder"]
group_three = ["Carla", "Dan", "Jefferson"]
if name in group_one:
print(name + " is in the first group")
elif name in group_two:
print(name + " is in group two")
elif name in group_three:
print(name + " is in group three")
else:
print(name + " does not have a group")
# 5.
height | |
# simulation object contains the current state of the simulation.
# It is analogous to the "bundle" object in the original FMS code.
import os
import shutil
import time
import copy
import types
import numpy as np
import h5py
import numpy.linalg as la
from pyspawn.fmsobj import fmsobj
from pyspawn.traj import traj
import pyspawn.general as gen
import pyspawn.complexgaussian as cg
class Simulation(fmsobj):
"""This is the main simulation module"""
def __init__(self, numstates):
# traj is a dictionary of trajectory basis functions (TBFs)
self.traj = dict()
# queue is a list of tasks to be run
self.queue = ["END"]
# tasktimes is a list of the simulation times associated with each task
self.tasktimes = [1e10]
# olapmax is the maximum overlap allowed for a spawn. Above this,
# the spawn is cancelled
self.olapmax = 0.5
# Number of electronic states (this needs to be fixed since this
# is already in traj object)
self.num_el_states = numstates
self.pop_threshold = 0.1
self.e_gap_thresh = 0.0
self.nuc_pop_thresh = 0.05
# Type of cloning: either 'toastate' or 'pairwise'
# 'pairwise' version seems to be not a good thing to do
self.cloning_type = "toastate"
# quantum time is the current time of the quantum amplitudes
self.quantum_time = 0.0
# timestep for quantum propagation
self.timestep = 0.0
self.clone_again = False
# quantum propagator
# self.qm_propagator = "RK2"
# quantum hamiltonian
# self.qm_hamiltonian = "adiabatic"
# maps trajectories to matrix element indices
# (the order of trajectories in the dictionary
# is not the same as amplitudes)
self.traj_map = dict()
# quantum amplitudes
self.qm_amplitudes = np.zeros(0, dtype=np.complex128)
# Total electronic population on each electronic states
# takes into account all nuclear basis functions
self.el_pop = np.zeros(self.num_el_states)
# energy shift for quantum propagation (better accuracy
# if energy is close to 0)
self.qm_energy_shift = 0.0
# variables to be output to hdf5 mapped to the size of each data point
self.h5_datasets = dict()
self.h5_types = dict()
# maximium walltime in seconds
self.max_quantum_time = -1.0
# maximium walltime in seconds
self.max_walltime = -1.0
def set_maxtime_all(self, maxtime):
"""Sets the maximum time for all trajectories"""
self.max_quantum_time = maxtime
h = self.timestep
for key in self.traj:
self.traj[key].maxtime = maxtime + h
def add_traj(self, traj):
"""Add a trajectory to the simulation"""
key = traj.label
print "Trajectory added:", key
mintime = traj.mintime
index = -1
for key2 in self.traj:
if mintime < self.traj[key2].mintime:
if index < 0:
index = self.traj_map[key2]
self.traj_map[key2] += 1
if index < 0:
index = len(self.traj)
self.traj[key] = traj
self.traj_map[key] = index
def propagate(self):
"""This is the main propagation loop for the simulation"""
gen.print_splash()
t0 = time.clock()
while True:
# update the queue (list of tasks to be computed)
print "\nUpdating task queue"
self.update_queue()
# if the queue is empty, we're done!
print "Time =", self.quantum_time
print "Checking if we are at the end of the simulation"
# if (self.queue[0] == "END"):
if self.quantum_time + 1.0e-6 > self.max_quantum_time:
print "Propagate DONE, simulation ended gracefully!"
return
# end simulation if walltime has expired
print "Checking if maximum wall time is reached"
if self.max_walltime < time.time() and self.max_walltime > 0:
print "Wall time expired, simulation ended gracefully!"
return
# it is possible for the queue to run empty but
# for the job not to be done
if self.queue[0] != "END":
# Right now we just run a single task per cycle,
# but we could parallelize here and send multiple tasks
# out for simultaneous processing.
current = self.pop_task()
print "\nStarting " + current
eval(current)
print "Done with " + current
else:
print "Task queue is empty"
# propagate quantum variables if possible
print "\nPropagating quantum amplitudes if we have enough",\
"information to do so"
self.propagate_quantum_as_necessary()
cond_num = np.linalg.cond(self.S)
if cond_num > 1000:
print "BAD S matrix: condition number =", cond_num, "\nExiting"
return
# print restart output - this must be the last line in this loop!
print "Updating restart output"
self.restart_output()
print "Elapsed wall time: %6.1f" % (time.clock() - t0)
def propagate_quantum_as_necessary(self):
"""here we will propagate the quantum amplitudes if we have
the necessary information to do so.
we have to determine what the maximum time is for which
we have all the necessary information to propagate the amplitudes"""
max_info_time = 1.0e10
# first check trajectories
for key in self.traj:
time = self.traj[key].time
if time < max_info_time:
max_info_time = time
print "We have enough information to propagate to time ", max_info_time
# now, if we have the necessary info, we propagate
while max_info_time > (self.quantum_time + 1.0e-6):
if self.quantum_time > 1.0e-6:
print "Propagating quantum amplitudes at time",\
self.quantum_time
self.qm_propagate_step()
else:
print "Propagating quantum amplitudes at time",\
self.quantum_time
self.qm_propagate_step(zoutput_first_step=True)
print "\nOutputing quantum information to hdf5"
self.h5_output()
self.calc_approx_el_populations()
print "\nNow we will clone new trajectories if necessary:"
if self.cloning_type == "toastate":
self.clone_to_a_state()
if self.clone_again:
for key in self.traj:
self.traj[key].compute_cloning_e_gap_thresh()
self.clone_to_a_state()
def qm_propagate_step(self, zoutput_first_step=False):
"""Exponential integrator (***Needs reference***)"""
c1i = (complex(0.0, 1.0))
self.compute_num_traj_qm()
qm_t = self.quantum_time
dt = self.timestep
qm_tpdt = qm_t + dt
amps_t = self.qm_amplitudes
print "Building effective Hamiltonian for the first half step"
self.build_Heff_half_timestep()
self.calc_approx_el_populations()
# norm = np.dot(np.conjugate(np.transpose(amps_t)),
# np.dot(self.S, amps_t))
# print "Norm first half =", norm
# output the first step before propagating
if zoutput_first_step:
self.h5_output()
iHdt = (-0.5 * dt * c1i) * self.Heff
W, R = la.eig(iHdt)
X = np.exp(W)
amps = amps_t
tmp1 = la.solve(R, amps)
tmp2 = X * tmp1 # element-wise multiplication
amps = np.matmul(R, tmp2)
self.quantum_time = qm_tpdt
print "Building effective Hamiltonian for the second half step"
self.build_Heff_half_timestep()
print "Effective Hamiltonian built"
iHdt = (-0.5 * dt * c1i) * self.Heff
W, R = la.eig(iHdt)
X = np.exp(W)
tmp1 = la.solve(R, amps)
tmp2 = X * tmp1 # element-wise multiplication
amps = np.matmul(R, tmp2)
self.qm_amplitudes = amps
norm = np.dot(np.conjugate(np.transpose(amps)),
np.dot(self.S, amps))
# print "Norm second half =", norm
if abs(norm - 1.0) > 0.01:
print "Warning: nuclear norm deviated from 1: norm =", norm
print "Done with quantum propagation"
def init_amplitudes_one(self):
"""Sets the first amplitude to 1.0 and all others to zero"""
self.compute_num_traj_qm()
self.qm_amplitudes = np.zeros_like(self.qm_amplitudes,
dtype=np.complex128)
self.qm_amplitudes[0] = 1.0
def compute_num_traj_qm(self):
"""Get number of trajectories. Note that the order of trajectories in
the dictionary is not the same as in Hamiltonian!
The new_amp variable is non-zero only for the child and parent cloned
TBFs. We assign amplitudes during the timestep after cloning happened,
new_amp variable is zeroed out"""
for ntraj, key in enumerate(self.traj):
if self.traj_map[key] + 1 > len(self.qm_amplitudes):
print "Adding trajectory ", key, "to the nuclear propagation"
# Adding the quantum amplitude for a new trajectory
self.qm_amplitudes = np.append(self.qm_amplitudes,
self.traj[key].new_amp)
self.traj[key].new_amp = 0.0
for key2 in self.traj:
if np.abs(self.traj[key2].new_amp) > 1e-6\
and key2 != key:
# when new trajectory added we need
# to update the amplitude of parent
self.qm_amplitudes[self.traj_map[key2]]\
= self.traj[key2].new_amp
# zeroing out new_amp variable
self.traj[key2].new_amp = 0.0
self.num_traj_qm = ntraj + 1
def invert_S(self):
"""compute Sinv from S"""
cond_num = np.linalg.cond(self.S)
if cond_num > 500:
print "BAD S matrix: condition number =", cond_num
else:
pass
self.Sinv = np.linalg.inv(self.S)
def build_Heff(self):
"""built Heff form H, Sinv, and Sdot"""
c1i = (complex(0.0, 1.0))
self.Heff = np.matmul(self.Sinv, (self.H - c1i * self.Sdot))
def build_S_elec(self):
"""Build matrix of electronic overlaps"""
ntraj = self.num_traj_qm
self.S_elec = np.zeros((ntraj, ntraj), dtype=np.complex128)
for keyi in self.traj:
i = self.traj_map[keyi]
if i < ntraj:
for keyj in self.traj:
j = self.traj_map[keyj]
if j < ntraj:
if i == j:
self.S_elec[i, j] = 1.0
else:
wf_i_T = np.transpose(
np.conjugate(self.traj[keyi].td_wf_full_ts_qm))
wf_j = self.traj[keyj].td_wf_full_ts_qm
self.S_elec[i, j] = np.dot(wf_i_T, wf_j)
def build_S(self):
"""Build the overlap matrix, S"""
if self.quantum_time > 0.0:
self.S_prev = self.S
ntraj = self.num_traj_qm
self.S = np.zeros((ntraj, ntraj), dtype=np.complex128)
self.S_nuc = np.zeros((ntraj, ntraj), dtype=np.complex128)
for keyi in self.traj:
i = self.traj_map[keyi]
if i < ntraj:
for keyj in self.traj:
j = self.traj_map[keyj]
if j < ntraj:
self.S_nuc[i, j] = cg.overlap_nuc(
self.traj[keyi].positions_qm,
self.traj[keyj].positions_qm,
self.traj[keyi].momenta_qm,
self.traj[keyj].momenta_qm,
self.traj[keyi].widths,
self.traj[keyj].widths,
self.traj[keyi].numdims)
self.S[i, j] = self.S_nuc[i, j] * self.S_elec[i, j]
def build_Sdot(self):
"""build the right-acting time derivative operator"""
ntraj = self.num_traj_qm
self.Sdot = | |
from numpy import random, mean
#note: Project completed with the use of lecture notes, notes from TA discussion section,
#and work on past assignements
#note: The provided code shows +2 results over number of itterations shown,
# and if I run world.run() without running the rest of the code the output in incorrect.
#these errors are maintained
#---no those make sense!
#well I broke it and I don't know how! It thinks they're happy, but I didn't
#touch am_i_happy or build_agents
#fixed it! Yay!
#he wants integratoin too! what?
#out path not working correctly
params = {'world_size':(20,20),
'num_agents':380,
'same_pref_r': 0.5,
'same_pref_b': 0.3,
'proportion_r': 0.6,
'max_iter' :100,
'look_before_move': True, #toggle this T/F for Question 2
'print_to_screen': True, #toggle this T/F for Question 1
'to_file': False, #toggle this T/F for Question 1
'out_path':r'c:\\users\\sarah\\desktop\\output.csv'}
class Agent():
def __init__(self, world, kind, same_pref):
self.world = world
self.kind = kind
self.same_pref = same_pref
self.location = None
def move(self, params): #I added params
#handle each agent's turn in the model iteration
#returns 0 and 6 for happy
#when look_before_move is True, agents check if they will be happy in a new location before moving
#returns 1 and 7 for unhappy but moved, and 2 and 8 for unhappy and couldn't move
#when look_before_move is False, unhappy agents do not check before moving
#returns 4 and 5 for unhappy but moved, all agents who are unhappy move
happy = self.am_i_happy()
if not happy and not params['look_before_move']:
#enter this loop if the look_before_move parameter is set to False
#handles moves for not happy but without checking if will be happy
#at the new location
#print("I'm in!") #debug
vacancies1 = self.world.find_vacant(return_all=True)
for patch in vacancies1:
self.world.grid[self.location] = None #move out of current patch
self.location = patch #assign new patch to myself
self.world.grid[patch] = self #update the grid
if self.kind == 'red':
return 4 #red moved
else:
return 5 #blue moved
elif not happy and params['look_before_move']:
#enter this loop if the look_before_move parameter is set to True
#handles moves for not happy, checking if will be happy at the new location
vacancies = self.world.find_vacant(return_all=True)
for patch in vacancies:
i_moved = False
will_i_like_it = self.am_i_happy(loc=patch)
if will_i_like_it is True:
self.world.grid[self.location] = None #move out of current patch
self.location = patch #assign new patch to myself
self.world.grid[patch] = self #update the grid
i_moved = True
if self.kind == 'red':
return 1 #red moved
else:
return 7 #blue moved
#if not i_moved:
if i_moved is False:
if self.kind == 'red':
return 2 # red failed to move
else:
return 8 # blue failed to move
elif happy:
if self.kind == 'red':
return 0 # red happy
else:
return 6 # blue happy
def am_i_happy(self, loc=False, neighbor_check=False):
#this should return a boolean for whether or not an agent is happy at a location
#if loc is False, use current location, else use specified location
if not loc:
starting_loc = self.location
else:
starting_loc = loc
neighbor_patches = self.world.locate_neighbors(starting_loc)
neighbor_agents = [self.world.grid[patch] for patch in neighbor_patches]
neighbor_kinds = [agent.kind for agent in neighbor_agents if agent is not None]
num_like_me = sum([kind == self.kind for kind in neighbor_kinds])
#for reporting purposes, allow checking of the current number of similar neighbors
if neighbor_check:
return [kind == self.kind for kind in neighbor_kinds]
#if an agent is in a patch with no neighbors at all, treat it as unhappy
if len(neighbor_kinds) == 0:
return False
perc_like_me = num_like_me / len(neighbor_kinds)
if perc_like_me < self.same_pref:
return False
else:
return True
def start_happy_r_b(self):
#for reporting purposes, allow count of happy before any moves, of red and blue seperately
if self.am_i_happy and self.kind == 'red':
return 'a'
elif self.am_i_happy and self.kind == 'blue':
return 'b'
else:
pass
class World():
def __init__(self, params):
assert(params['world_size'][0] * params['world_size'][1] > params['num_agents']), 'Grid too small for number of agents.'
self.params = params
self.reports = {}
self.grid = self.build_grid(params['world_size'])
self.agents = self.build_agents(params['num_agents'], params['same_pref_r'], params['same_pref_b'])
self.init_world()
def build_grid(self, world_size):
#create the world that the agents can move around on
locations = [(i,j) for i in range(world_size[0]) for j in range(world_size[1])]
return {l:None for l in locations}
def build_agents(self, num_agents, same_pref_r, same_pref_b):
#generate a list of Agents that can be iterated over
def _kind_picker(i):
if i < round(num_agents * params['proportion_r']):
return 'red'
else:
return 'blue'
def _pref_picker(i):
if i < round(num_agents * params['proportion_r']):
return params['same_pref_r']
else:
return params['same_pref_b']
agents = [Agent(self, _kind_picker(i), _pref_picker(i)) for i in range(num_agents)]
random.shuffle(agents)
return agents
#print('I built an agent') #why won't this work? Error: "unrechable code"
def init_world(self):
#a method for all the steps necessary to create the starting point of the model
for agent in self.agents:
loc = self.find_vacant()
self.grid[loc] = agent
agent.location = loc
assert(all([agent.location is not None for agent in self.agents])), "Some agents don't have homes!"
assert(sum([occupant is not None for occupant in self.grid.values()]) == self.params['num_agents']), 'Mismatch between number of agents and number of locations with agents.'
#set up some reporting dictionaries
self.reports['integration'] = []
self.reports['red_integration'] =[]
self.reports['blue_integration'] = []
def find_vacant(self, return_all=False):
#finds all empty patches on the grid and returns a random one, unless kwarg return_all==True,
#then it returns a list of all empty patches
empties = [loc for loc, occupant in self.grid.items() if occupant is None]
if return_all:
return empties
else:
choice_index = random.choice(range(len(empties)))
return empties[choice_index]
def locate_neighbors(self, loc):
#given a location, return a list of all the patches that count as neighbors
include_corners = True
x, y = loc
cardinal_four = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]
if include_corners:
corner_four = [(x+1, y+1), (x+1, y-1), (x-1, y+1), (x-1, y-1)]
neighbors = cardinal_four + corner_four
else:
neighbors = cardinal_four
#handle patches that are at the edges, assuming a "torus" shape
x_max = self.params['world_size'][0] - 1
y_max = self.params['world_size'][1] - 1
def _edge_fixer(loc):
x, y = loc
if x < 0:
x = x_max
elif x > x_max:
x = 0
if y < 0:
y = y_max
elif y > y_max:
y = 0
return (x, y)
neighbors = [_edge_fixer(loc) for loc in neighbors]
return neighbors
def report_integration(self):
diff_neighbors = []
diff_neighbours_r = []
diff_neighbours_b = []
for agent in self.agents:
diff_neighbors.append(sum(
[not a for a in agent.am_i_happy(neighbor_check=True)]
))
for agent in self.agents:
if agent.kind == 'red':
diff_neighbours_r.append(sum(
[not a for a in agent.am_i_happy(neighbor_check=True)]
))
for agent in self.agents:
if agent.kind == 'blue':
diff_neighbours_b.append(sum(
[not a for a in agent.am_i_happy(neighbor_check=True)]
))
self.reports['integration'].append(round(mean(diff_neighbors), 2))
self.reports['red_integration'].append(round(mean(diff_neighbours_r), 2))
self.reports['blue_integration'].append(round(mean(diff_neighbours_b), 2))
def run(self):
#handle the iterations of the model
log_of_happy = []
log_of_rand = []
log_of_rand_r = []
log_of_rand_b = []
log_of_happy_r = []
log_of_happy_b = []
log_of_moved_r = []
log_of_moved_b = []
log_of_moved = []
log_of_stay = []
log_of_stay_r = []
log_of_stay_b = []
self.report_integration()
log_of_happy.append(sum([a.am_i_happy() for a in self.agents])) #starting happiness
happy_results = [agent.start_happy_r_b() for agent in self.agents]
log_of_happy_r.append(sum([r == 'a' for r in happy_results])) #starting happiness
log_of_happy_b.append(sum([r == 'b' for r in happy_results])) #starting happiness
log_of_rand_r.append(0) #no one moved at startup
log_of_rand_b.append(0) #no one moved at startup
log_of_moved_r.append(0) #no one moved at startup
log_of_moved_b.append(0) #no one moved at startup
log_of_stay_r.append(0) #no one stayed at startup
log_of_stay_b.append(0) #no one stayed at startup
for iteration in range(self.params['max_iter']):
random.shuffle(self.agents) #randomize agents before every iteration
move_results = [agent.move(params) for agent in self.agents]
self.report_integration()
num_happy_at_start =sum([r==0 for r in move_results]) + sum([r==6 for r in move_results])
num_happy_at_start_r = sum([r==0 for r in move_results])
num_happy_at_start_b = sum([r==6 for r in move_results])
num_moved = sum([r==1 for r in move_results]) + sum([r==7 for r in move_results])
num_moved_r = sum([r==1 for r in move_results])
num_moved_b = sum([r==7 for r in move_results])
num_stayed_unhappy = sum([r==2 for r in move_results]) + sum([r==8 for r in move_results])
num_stayed_unhappy_r = sum([r==2 for r in move_results])
num_stayed_unhappy_b = sum([r==8 for r in move_results])
#num_moved_random = sum([r==3 for r in move_results]) #outdated
num_moved_random_r = sum([r==4 for r in move_results])
num_moved_random_b = sum([r==5 for r in move_results])
log_of_happy.append(num_happy_at_start)
| |
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import errno
import filecmp
import glob
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
import traceback
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, GetUrlCookieFile, \
ID_RE
from error import GitError, HookError, UploadError, DownloadError
from error import ManifestInvalidRevisionError
from error import NoManifestException
import platform_utils
from trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from pyversion import is_python3
if is_python3():
import urllib.parse
else:
import imp
import urlparse
urllib = imp.new_module('urllib')
urllib.parse = urlparse
input = raw_input
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'w')
try:
fd.write(content)
finally:
fd.close()
try:
platform_utils.rename(lock, path)
except OSError:
platform_utils.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def _warn(fmt, *args):
msg = fmt % args
print('warn: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = platform_utils.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d, 'hooks')
_project_hook_list = [os.path.join(d, x) for x in platform_utils.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list('--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list('--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log('--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people,
auto_topic=False,
draft=False,
private=False,
notify=None,
wip=False,
dest_branch=None,
validate_certs=True,
push_options=None):
self.project.UploadForReview(self.name,
people,
auto_topic=auto_topic,
draft=draft,
private=private,
notify=notify,
wip=wip,
dest_branch=dest_branch,
validate_certs=validate_certs,
push_options=push_options)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.branch = self.printer('header', attr='bold')
self.nobranch = self.printer('nobranch', fg='red')
self.important = self.printer('important', fg='red')
self.added = self.printer('added', fg='green')
self.changed = self.printer('changed', fg='red')
self.untracked = self.printer('untracked', fg='red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr='bold')
class _Annotation(object):
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
class _CopyFile(object):
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
platform_utils.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile(object):
def __init__(self, git_worktree, src, dest, relsrc, absdest):
self.git_worktree = git_worktree
self.src = src
self.dest = dest
self.src_rel_to_dest = relsrc
self.abs_dest = absdest
def __linkIt(self, relSrc, absDest):
# link file if it does not exist or is out of date
if not platform_utils.islink(absDest) or (platform_utils.readlink(absDest) != relSrc):
try:
# remove existing file first, since it might be read-only
if os.path.lexists(absDest):
platform_utils.remove(absDest)
else:
dest_dir = os.path.dirname(absDest)
if not platform_utils.isdir(dest_dir):
os.makedirs(dest_dir)
platform_utils.symlink(relSrc, absDest)
except IOError:
_error('Cannot link file %s to %s', relSrc, absDest)
def _Link(self):
"""Link the self.rel_src_to_dest and self.abs_dest. Handles wild cards
on the src linking all of the files in the source in to the destination
directory.
"""
# We use the absSrc to handle the situation where the current directory
# is not the root of the repo
absSrc = os.path.join(self.git_worktree, self.src)
if os.path.exists(absSrc):
# Entity exists so just a simple one to one link operation
self.__linkIt(self.src_rel_to_dest, self.abs_dest)
else:
# Entity doesn't exist assume there is a wild card
absDestDir = self.abs_dest
if os.path.exists(absDestDir) and not platform_utils.isdir(absDestDir):
_error('Link error: src with wildcard, %s must be a directory',
absDestDir)
else:
absSrcFiles = glob.glob(absSrc)
for absSrcFile in absSrcFiles:
# Create a releative path from source dir to destination dir
absSrcDir = os.path.dirname(absSrcFile)
relSrcDir = os.path.relpath(absSrcDir, absDestDir)
# Get the source file name
srcFile = os.path.basename(absSrcFile)
# Now form the final full paths to srcFile. They will be
# absolute for the desintaiton and relative for the srouce.
absDest = os.path.join(absDestDir, srcFile)
relSrc = os.path.join(relSrcDir, srcFile)
self.__linkIt(relSrc, absDest)
class RemoteSpec(object):
def __init__(self,
name,
url=None,
pushUrl=None,
review=None,
revision=None,
orig_name=None,
fetchUrl=None):
self.name = name
self.url = url
self.pushUrl = pushUrl
self.review = review
self.revision = revision
self.orig_name = orig_name
self.fetchUrl = fetchUrl
class RepoHook(object):
"""A RepoHook contains information about a script to run as a hook.
Hooks are used to run a python script before running an upload (for instance,
to run presubmit checks). Eventually, we may have hooks for other actions.
This shouldn't be confused with files in the 'repo/hooks' directory. Those
files are copied into each '.git/hooks' folder for each project. Repo-level
hooks are associated instead with repo actions.
Hooks are always python. When a hook is run, we will load the hook into the
interpreter and execute its main() function.
"""
def __init__(self,
hook_type,
hooks_project,
topdir,
manifest_url,
abort_if_user_denies=False):
"""RepoHook constructor.
Params:
hook_type: A string representing the type of hook. This is also used
to figure out the name of the file containing the hook. For
example: 'pre-upload'.
hooks_project: The project containing the repo hooks. If you have a
manifest, this is manifest.repo_hooks_project. OK if this is None,
which will make the hook a no-op.
topdir: Repo's top directory (the one containing the .repo directory).
Scripts will run with CWD as this directory. If you have a manifest,
this is manifest.topdir
manifest_url: The URL to the manifest git repo.
abort_if_user_denies: If True, we'll throw a HookError() if the user
doesn't allow us to run the hook.
"""
self._hook_type = hook_type
self._hooks_project = hooks_project
self._manifest_url = manifest_url
self._topdir = topdir
self._abort_if_user_denies = abort_if_user_denies
# Store the full path to the script for convenience.
if self._hooks_project:
self._script_fullpath = os.path.join(self._hooks_project.worktree,
self._hook_type + '.py')
else:
self._script_fullpath = None
def _GetHash(self):
"""Return a hash of the contents of the hooks directory.
We'll just use git to do this. This hash has the property that if anything
changes in the directory we will return a different has.
SECURITY CONSIDERATION:
This hash only represents the contents of files in the hook directory, not
any other files imported or called by hooks. Changes to imported files
can change the script behavior without affecting the hash.
Returns:
A string representing the hash. This will always be ASCII so that it can
be printed to the user easily.
"""
assert self._hooks_project, "Must have hooks to calculate their hash."
# We will use the work_git object rather than just calling GetRevisionId().
# That gives us a hash of | |
#!/usr/bin/python3
'''
* Copyright (C) 2021 <NAME>
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE.txt file for details.
'''
##################
#Needed libraries#
##################
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import qiskit as q
import sys
from qiskit.visualization import plot_histogram
from qiskit.providers.ibmq import least_busy
from random import getrandbits
'''
Grover's algorithim. Intro
'''
#######################
#Functions definitions#
#######################
'''
Usage function
calling the program with "-h" or "--help" will display the help without returning an error (help was intended)
calling the progam with no options or wrong ones, will display the same help but returning an error
Please bear in mind that some combination of options are simply ignored, see the text of this function itself
'''
def usage():
print("Usage: " + str((sys.argv)[0]) + " i j k l")
print("i: Number of qubits (2 or 3, will yield error if different)")
print("j: Number of solutions (only taken into account if i=3, otherwise ignored). Can only be 1 or 2, will yield error otherwise")
print("k: Number of iterations (only taken into account for i=3 and j=1, othwerise ignored). Can only be 1 or 2, will yield error otherwise")
print("l: Perform computations in real quantum hardware, can only be 0 (no) or 1 (yes), will yield error otherwise")
if len(sys.argv) == 2 and (str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help"):
exit(0)
else:
exit(1)
'''
Check whether parameter is an integer
'''
def is_intstring(s):
try:
int(s)
return True
except ValueError:
return False
'''
Initialization:
Simply apply an H gate to every qubit
'''
def initialize():
if len(sys.argv) == 1:
print ("No arguments given")
usage()
elif len(sys.argv) > 5 or str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help" or (not (is_intstring(sys.argv[1]))) or (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
#elif (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
usage()
else:
#print ("Rest of cases")
for arg in sys.argv[2:]:
if not is_intstring(arg):
sys.exit("All arguments must be integers. Exit.")
qc = q.QuantumCircuit((sys.argv)[1])
#Apply a H-gate to all qubits in qc
for i in range(qc.num_qubits):
qc.h(i)
qc.barrier()
return qc
'''
Implement multi controlled Z-gate, easy to reutilize
'''
def mctz(qc):
qc.h(2)
qc.mct(list(range(2)), 2)
qc.h(2)
'''
Oracle metaimplementation
This function will simply call one of the possibles oracles functions
'''
def oracle (qc):
#Generate some random bits and implement the oracle accordingly with the result
bits=getrandbits(qc.num_qubits)
#2 qubits
if int((sys.argv)[1]) == 2:
print("Random bits to search for are (decimal representation): " + str(bits))
oracle_2_qubits(qc,bits)
#3 qubits
elif int((sys.argv)[1]) == 3:
#Single solution
if int((sys.argv)[2]) == 1:
'''
Explanation:
less than sqrt(N) iterations will be needed (so will need to "floor" (truncate) the result)
As 2 < sqrt(8) < 3 --> n=2 for 100% prob. With n=1, p=0.78125=78,125%
In the classical case, p=1/4=25% (single query followed by a random guess: 1/8 + 7/8 · 1/7 = 1/4 = 25%)
Classical results with two runs, p=1/8+7/8·1/7+6/8·1/6= 1/4 + 1/8 = 3/8 = 0.375 = 37,5%
'''
print("Random bits to search for are (decimal representation): " + str(bits))
#Check whether 1 or 2 iterations were requested
if (int((sys.argv)[3]) == 1) or (int((sys.argv)[3]) == 2):
iterations = int((sys.argv)[3])
for i in range(iterations):
oracle_3_qubits_single_solution(qc,bits)
diffusion(grover_circuit)
#For any other case, wrong arguments were used, exit
else:
usage()
#2 possible solutions
elif int((sys.argv)[2]) == 2:
'''
Explanation:
less than sqrt(N/M) times (M=2 different results to look for) will be needed (so will need to "floor" (truncate) the result)
As sqrt(8/2) = 2 --> n=1 for a theoretical 100% prob. In the classical case, 13/28 = 46,4%
'''
#A list instead of a single element will be used, initialize it with the previous value as first element
bits=[bits]
#Generate the second element, also randomly
bits.append(getrandbits(qc.num_qubits))
#Elements have to be different, regenerate as many times as needed till different
while bits[0] == bits[1]:
bits[1]=getrandbits(3)
#When done, sort the list of random bits. Order does not matter for our upcoming permutations
bits.sort()
print("Random bits to search for are (decimal representation): " + str(bits[0]) + " and " + str(bits[1]))
oracle_3_qubits_2_solutions(qc,bits)
#Algorithm only implemented for 1 or 2 possible solution(s), exit if something different requested
else:
usage()
#Algorithm only implemented for 1 or 2 qubits, exit if something different requested
else:
usage()
'''
Oracle implementation for 2 qubits.
Simply a controlled-Z gate (cz in qiskit).
For qubits different to 1, an x-gate is needed before and after the cz-gate
'''
def oracle_2_qubits(qc,bits):
if bits == 0: #00
qc.x(0)
qc.x(1)
qc.cz(0, 1)
qc.x(0)
qc.x(1)
elif bits == 1: #01
qc.x(1)
qc.cz(0,1)
qc.x(1)
elif bits == 2: #10
qc.x(0)
qc.cz(0,1)
qc.x(0)
elif bits == 3: #11
qc.cz(0,1)
qc.barrier()
'''
Oracle implementation for 3 qubits and single solution.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 1)
'''
def oracle_3_qubits_single_solution(qc,bits):
if bits == 0:
for i in range(3):
qc.x(i)
mctz(qc)
for i in range(3):
qc.x(i)
elif bits == 1:
for i in range(1, 3):
qc.x(i)
mctz(qc)
for i in range(1, 3):
qc.x(i)
elif bits == 2:
for i in range(0, 3, 2):
qc.x(i)
mctz(qc)
for i in range(0, 3, 2):
qc.x(i)
elif bits == 3:
qc.x(2)
mctz(qc)
qc.x(2)
elif bits == 4:
for i in range(2):
qc.x(i)
mctz(qc)
for i in range(2):
qc.x(i)
elif bits == 5:
qc.x(1)
mctz(qc)
qc.x(1)
elif bits == 6:
qc.x(0)
mctz(qc)
qc.x(0)
elif bits == 7:
mctz(qc)
qc.barrier()
'''
Oracle implementation for 3 qubits and two possible solutions.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 2)
'''
def oracle_3_qubits_2_solutions(qc,bits):
if (bits[0] == 0 and bits[1] == 1):
for i in range(1,3):
qc.z(i)
qc.cz(1, 2)
elif (bits[0] == 0 and bits[1] == 2):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 3):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 4):
for i in range(2):
qc.z(i)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 5):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 6):
for i in range(3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 7):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 2):
for i in range(2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 3):
qc.z(0)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 4):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 5):
qc.z(0)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 6):
qc.z(0)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 7):
qc.z(0)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 3):
qc.z(1)
qc.cz(1, 2)
elif (bits[0] == 2 and bits[1] == 4):
for i in range(1,3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 5):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 6):
qc.z(1)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 7):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 4):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 5):
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 7):
qc.cz(0, 1)
elif (bits[0] == 4 and bits[1] == 5):
qc.z(2)
qc.cz(1, 2)
elif (bits[0] == 4 and bits[1] == 6):
qc.z(2)
qc.cz(0, 2)
elif (bits[0] == 4 and bits[1] == 7):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 7):
qc.cz(0, 2)
elif (bits[0] == 6 and bits[1] == 7):
qc.cz(1, 2)
qc.barrier()
'''
Diffusion operator: Flip sign and amplify
For 2 qubits, simply apply H and Z to each qubit, then cz, and then apply H again to each qubit:
'''
def diffusion(qc):
if qc.num_qubits == 2:
qc.h(0)
qc.h(1)
qc.z(0)
qc.z(1)
qc.cz(0,1)
qc.h(0)
qc.h(1)
elif qc.num_qubits == 3:
#Apply diffusion operator
for i in range(3):
qc.h(i)
qc.x(i)
# multi-controlled-toffoli
mctz(qc)
qc.barrier()
for i in range(3):
| |
matching criteria
matchLevel = self.searchMatchLevels[resolvedEntityMatchInfo['MATCH_LEVEL']]
matchKey = resolvedEntityMatchInfo['MATCH_KEY']
ruleCode = resolvedEntityMatchInfo['ERRULE_CODE']
#--scoring
bestScores = {}
bestScores['NAME'] = {}
bestScores['NAME']['score'] = 0
bestScores['NAME']['value'] = ''
for featureCode in resolvedEntityMatchInfo['FEATURE_SCORES']:
for scoreRecord in resolvedEntityMatchInfo['FEATURE_SCORES'][featureCode]:
if featureCode == 'NAME':
if 'BT_FN' in scoreRecord:
scoreCode = 'BT_FN'
else:
scoreCode = 'GNR_FN'
else:
scoreCode = 'FULL_SCORE'
matchingScore= scoreRecord[scoreCode]
matchingValue = scoreRecord['CANDIDATE_FEAT']
if featureCode not in bestScores:
bestScores[featureCode] = {}
bestScores[featureCode]['score'] = 0
bestScores[featureCode]['value'] = 'n/a'
if matchingScore > bestScores[featureCode]['score']:
bestScores[featureCode]['score'] = matchingScore
bestScores[featureCode]['value'] = matchingValue
#--perform scoring (use stored match_score if not overridden in the mapping document)
matchedScore = bestScores['NAME']['score']
matchedName = bestScores['NAME']['value']
if False:
matchScore = str(((5-resolvedEntityMatchInfo['MATCH_LEVEL']) * 100) + int(resolvedEntityMatchInfo['MATCH_SCORE'])) + '-' + str(1000+bestScores['NAME']['score'])[-3:]
else:
weightedScores = {}
for featureCode in bestScores:
weightedScores[featureCode] = {}
weightedScores[featureCode]['threshold'] = 0
weightedScores[featureCode]['+weight'] = 100
weightedScores[featureCode]['-weight'] = 0
#if scoredFeatureCount > 1:
matchScore = 0
for featureCode in bestScores:
if featureCode in weightedScores:
if bestScores[featureCode]['score'] >= weightedScores[featureCode]['threshold']:
matchScore += int(round(bestScores[featureCode]['score'] * (weightedScores[featureCode]['+weight'] / 100),0))
elif '-weight' in weightedScores[featureCode]:
matchScore += -weightedScores[featureCode]['-weight'] #--actual score does not matter if below the threshold
#--create the possible match entity one-line summary
row = []
row.append(str(searchIndex)) #--note this gets re-ordered below
row.append(str(resolvedEntity['ENTITY_ID']))
row.append(resolvedEntity['ENTITY_NAME'] + (('\n' + ' aka: ' + matchedName) if matchedName and matchedName != resolvedEntity['ENTITY_NAME'] else ''))
row.append('\n'.join(dataSourceList))
matchData = {}
matchData['matchKey'] = matchKey
matchData['ruleCode'] = self.getRuleDesc(ruleCode)
row.append(formatMatchData(matchData, self.colors))
row.append(matchScore)
matchList.append(row)
if len(matchList) == 0:
print()
if 'SEARCH_STATISTICS' in jsonResponse:
if jsonResponse['SEARCH_STATISTICS'][0]['CANDIDATE_KEYS']['SUMMARY']['FOUND'] > 0:
print('\tOne or more entities were found but did not score high enough to be returned')
print('\tPlease include additional or more complete attributes in your search')
elif jsonResponse['SEARCH_STATISTICS'][0]['CANDIDATE_KEYS']['SUMMARY']['GENERIC'] > 0:
print('\tToo many entities would be returned')
print('\tPlease include additional attributes to narrow the search results')
elif jsonResponse['SEARCH_STATISTICS'][0]['CANDIDATE_KEYS']['SUMMARY']['NOT_FOUND'] > 0:
print('\tNo entities at all were found')
print('\tPlease search by other attributes for this entity if you feel it should exist')
else:
print('\tNo search keys were even generated')
print('\tPlease search by other attributes')
else: #--older versions do not have statistics
print('\tNo matches found or there were simply too many to return')
print('\tPlease include additional search parameters if you feel this entity is in the database')
else:
#--sort the list by match score descending
matchList = sorted(matchList, key=lambda x: x[5], reverse=True)
#--store the last search result and colorize
self.lastSearchResult = []
for i in range(len(matchList)):
self.lastSearchResult.append(matchList[i][1])
matchList[i][0] = str(i+1)
matchList[i][1] = colorize(matchList[i][1], self.colors['entityid'])
matchList[i][2] = matchList[i][2]
self.renderTable(tblTitle, tblColumns, matchList)
print('')
# -----------------------------
def do_get(self,arg):
'\nDisplays a particular entity by entity_id or by data_source and record_id.' \
'\n\nSyntax:' \
'\n\tget <entity_id>' \
'\n\tget <dataSource> <recordID>' \
'\n\tget search <search index>' \
'\n\tget detail <entity_id>' \
'\n\tget detail <dataSource> <recordID>' \
'\n\nNotes: ' \
'\n\tget search is a shortcut to the entity ID at the search index provided. Must be valid for the last search performed' \
'\n\tget detail displays every record for the entity while a get alone displays a summary of the entity by dataSource.\n'
if not argCheck('do_get', arg, self.do_get.__doc__):
return
#--no return code if called direct
calledDirect = sys._getframe().f_back.f_code.co_name != 'onecmd'
if 'DETAIL ' in arg.upper():
showDetail = True
arg = arg.upper().replace('DETAIL ','')
else:
showDetail = False
if len(arg.split()) == 2 and arg.split()[0].upper() == 'SEARCH':
lastToken = arg.split()[1]
if not lastToken.isdigit() or lastToken == '0' or int(lastToken) > len(self.lastSearchResult):
printWithNewLines('Select a valid index from the prior search results to use this command', 'B')
return -1 if calledDirect else 0
else:
arg = str(self.lastSearchResult[int(lastToken)-1])
getFlagList = []
if apiVersion['VERSION'][0:1] > '1':
getFlagList.append('G2_ENTITY_INCLUDE_ENTITY_NAME')
getFlagList.append('G2_ENTITY_INCLUDE_RECORD_DATA')
getFlagList.append('G2_ENTITY_INCLUDE_RECORD_MATCHING_INFO')
getFlagList.append('G2_ENTITY_INCLUDE_RECORD_FORMATTED_DATA')
getFlagList.append('G2_ENTITY_INCLUDE_ALL_RELATIONS')
getFlagList.append('G2_ENTITY_INCLUDE_RELATED_ENTITY_NAME')
getFlagList.append('G2_ENTITY_INCLUDE_RELATED_MATCHING_INFO')
getFlagList.append('G2_ENTITY_INCLUDE_RELATED_RECORD_SUMMARY')
else:
getFlagList.append('G2_ENTITY_INCLUDE_ALL_FEATURES')
getFlagList.append('G2_ENTITY_INCLUDE_ALL_RELATIONS')
getFlagBits = self.computeApiFlags(getFlagList)
if len(arg.split()) == 1:
apiCall = f'getEntityByEntityIDV2({arg}, {getFlagBits}, response)'
try:
response = bytearray()
retcode = g2Engine.getEntityByEntityIDV2(int(arg), getFlagBits, response)
response = response.decode() if response else ''
except G2Exception as err:
printWithNewLines(str(err), 'B')
return -1 if calledDirect else 0
elif len(arg.split()) == 2:
apiCall = f'getEntityByRecordIDV2("{arg.split()[0]}", "{arg.split()[1]}", {getFlagBits}, response)'
try:
response = bytearray()
retcode = g2Engine.getEntityByRecordIDV2(arg.split()[0], arg.split()[1], getFlagBits, response)
response = response.decode() if response else ''
except G2Exception as err:
printWithNewLines(str(err), 'B')
return -1 if calledDirect else 0
else:
argError(arg, 'incorrect number of parameters')
return 0
if debugOutput:
showApiDebug('get', apiCall, getFlagList, json.loads(response) if response else '{}')
if len(response) == 0:
printWithNewLines('0 records found %s' % response, 'B')
return -1 if calledDirect else 0
resolvedJson = json.loads(str(response))
relatedEntityCount = len(resolvedJson['RELATED_ENTITIES']) if 'RELATED_ENTITIES' in resolvedJson else 0
entityID = str(resolvedJson['RESOLVED_ENTITY']['ENTITY_ID'])
entityName = resolvedJson['RESOLVED_ENTITY']['ENTITY_NAME']
reportType = 'Detail' if showDetail else 'Summary'
tblTitle = f'Entity {reportType} for: {entityID} - {entityName}'
tblColumns = []
tblColumns.append({'name': 'Record ID', 'width': 50, 'align': 'left'})
tblColumns.append({'name': 'Entity Data', 'width': 100, 'align': 'left'})
tblColumns.append({'name': 'Additional Data', 'width': 100, 'align': 'left'})
#--summarize by data source
if reportType == 'Summary':
dataSources = {}
recordList = []
for record in resolvedJson['RESOLVED_ENTITY']['RECORDS']:
if record['DATA_SOURCE'] not in dataSources:
dataSources[record['DATA_SOURCE']] = []
dataSources[record['DATA_SOURCE']].append(record)
#--summarize by data source
for dataSource in sorted(dataSources):
recordData, entityData, otherData = self.formatRecords(dataSources[dataSource], reportType)
row = [recordData, entityData, otherData]
recordList.append(row)
#--display each record
else:
recordList = []
for record in sorted(resolvedJson['RESOLVED_ENTITY']['RECORDS'], key = lambda k: (k['DATA_SOURCE'], k['RECORD_ID'])):
recordData, entityData, otherData = self.formatRecords(record, 'entityDetail')
row = [recordData, entityData, otherData]
recordList.append(row)
#--display if no relationships
if relatedEntityCount == 0:
self.renderTable(tblTitle, tblColumns, recordList, titleColor=self.colors['entityTitle'])
return 0
#--otherwise begin the report and add the relationships
self.renderTable(tblTitle, tblColumns, recordList, titleColor=self.colors['entityTitle'], displayFlag='begin')
relationships = []
for relatedEntity in resolvedJson['RELATED_ENTITIES']:
relationship = {}
relationship['MATCH_LEVEL'] = relatedEntity['MATCH_LEVEL']
relationship['MATCH_SCORE'] = relatedEntity['MATCH_SCORE']
relationship['MATCH_KEY'] = relatedEntity['MATCH_KEY']
relationship['ERRULE_CODE'] = relatedEntity['ERRULE_CODE']
relationship['ENTITY_ID'] = relatedEntity['ENTITY_ID']
relationship['ENTITY_NAME'] = relatedEntity['ENTITY_NAME']
relationship['DATA_SOURCES'] = []
for dataSource in relatedEntity['RECORD_SUMMARY']:
relationship['DATA_SOURCES'].append('%s (%s)' %(colorize(dataSource['DATA_SOURCE'], self.colors['datasource']), dataSource['RECORD_COUNT']))
relationships.append(relationship)
tblTitle = f'{relatedEntityCount} related entities'
tblColumns = []
tblColumns.append({'name': 'Entity ID', 'width': 15, 'align': 'left'})
tblColumns.append({'name': 'Entity Name', 'width': 75, 'align': 'left'})
tblColumns.append({'name': 'Data Sources', 'width': 75, 'align': 'left'})
tblColumns.append({'name': 'Match Level', 'width': 25, 'align': 'left'})
tblColumns.append({'name': 'Match Key', 'width': 50, 'align': 'left'})
relatedRecordList = []
for relationship in sorted(relationships, key = lambda k: k['MATCH_LEVEL']):
row = []
row.append(colorize(str(relationship['ENTITY_ID']), self.colors['entityid']))
row.append(relationship['ENTITY_NAME'])
row.append('\n'.join(sorted(relationship['DATA_SOURCES'])))
row.append(self.relatedMatchLevels[relationship['MATCH_LEVEL']])
matchData = {}
matchData['matchKey'] = relationship['MATCH_KEY']
matchData['ruleCode'] = self.getRuleDesc(relationship['ERRULE_CODE'])
row.append(formatMatchData(matchData, self.colors))
relatedRecordList.append(row)
self.renderTable(tblTitle, tblColumns, relatedRecordList, titleColor=self.colors['entityTitle'], titleJustify='l', displayFlag='end')
return 0
# -----------------------------
def formatRecords(self, recordList, reportType):
dataSource = 'unknown'
recordIdList = []
primaryNameList = []
otherNameList = []
attributeList = []
identifierList = []
addressList = []
phoneList = []
otherList = []
for record in [recordList] if type(recordList) != list else recordList:
#--should only ever be one data source in the list
dataSource = colorize(record['DATA_SOURCE'], self.colors['datasource'])
recordIdData = record['RECORD_ID']
if reportType == 'Detail':
if record['MATCH_KEY']:
matchData = {}
matchData['matchKey'] = record['MATCH_KEY']
matchData['ruleCode'] = self.getRuleDesc(record['ERRULE_CODE'])
recordIdData += '\n' + formatMatchData(matchData, self.colors)
#if record['ERRULE_CODE']:
# recordIdData += '\n ' + colorize(self.getRuleDesc(record['ERRULE_CODE']), 'dim')
recordIdList.append(recordIdData)
for item in record['NAME_DATA']:
if item.upper().startswith('PRIMARY'):
primaryNameList.append(colorizeAttribute(item, self.colors['highlight1']))
else:
otherNameList.append(colorizeAttribute('NAME: ' + item if ':' not in item else item, self.colors['highlight1']))
for item in record['ADDRESS_DATA']:
addressList.append(colorizeAttribute('ADDRESS: ' + item if ':' not in item else item, self.colors['highlight1']))
for item in record['PHONE_DATA']:
phoneList.append(colorizeAttribute('PHONE: ' + item if ':' not in item else item, self.colors['highlight1']))
for item in record['ATTRIBUTE_DATA']:
attributeList.append(colorizeAttribute(item, self.colors['highlight1']))
for item in record['IDENTIFIER_DATA']:
identifierList.append(colorizeAttribute(item, self.colors['highlight1']))
for item in sorted(record['OTHER_DATA']):
if not self.isInternalAttribute(item) or reportType == 'Detail':
otherList.append(colorizeAttribute(item, self.colors['highlight1']))
recordDataList = [dataSource] + sorted(recordIdList)
entityDataList = list(set(primaryNameList)) + list(set(otherNameList)) + sorted(set(attributeList)) + sorted(set(identifierList)) + list(set(addressList)) + list(set(phoneList))
otherDataList = sorted(set(otherList))
if reportType == 'Detail':
columnHeightLimit = 1000
else:
columnHeightLimit = 50
recordData = '\n'.join(recordDataList[:columnHeightLimit])
if len(recordDataList) > columnHeightLimit:
recordData += '\n+%s more ' % str(len(recordDataList) - columnHeightLimit)
entityData = '\n'.join(entityDataList[:columnHeightLimit])
if len(entityDataList) > columnHeightLimit:
entityData += '\n+%s more ' % str(len(entityDataList) - columnHeightLimit)
otherData = '\n'.join(otherDataList[:columnHeightLimit])
if len(otherDataList) > columnHeightLimit:
otherData += '\n+%s more ' % str(len(otherDataList) - columnHeightLimit)
return recordData, entityData, otherData
# -----------------------------
def getAmbiguousEntitySet(self, entityId):
#--get other ambiguous relationships if this is the ambiguous entity
getFlagList = []
if apiVersion['VERSION'][0:1] > '1':
getFlagList.append('G2_ENTITY_INCLUDE_ALL_FEATURES')
getFlagList.append('G2_ENTITY_OPTION_INCLUDE_INTERNAL_FEATURES')
getFlagList.append('G2_ENTITY_INCLUDE_ALL_RELATIONS')
getFlagList.append('G2_ENTITY_INCLUDE_RELATED_MATCHING_INFO')
else:
getFlagList.append('G2_ENTITY_INCLUDE_ALL_FEATURES')
getFlagList.append('G2_ENTITY_SHOW_FEATURES_EXPRESSED')
getFlagList.append('G2_ENTITY_SHOW_FEATURES_STATS')
getFlagList.append('G2_ENTITY_INCLUDE_ALL_RELATIONS')
getFlagBits = self.computeApiFlags(getFlagList)
try:
response = bytearray()
retcode = g2Engine.getEntityByEntityIDV2(int(entityId), getFlagBits, response)
response = | |
"""Neighbor models for regression."""
from .base import (NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin,
NeighborsRegressorMixin)
class KNeighborsRegressor(NeighborsBase, NeighborsRegressorMixin,
KNeighborsMixin):
"""Regression based on k-nearest neighbors.
Regression with scalar, multivariate or functional response.
The target is predicted by local interpolation of the targets associated of
the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
regressor : callable, optional ((default =
:func:`mean <skfda.exploratory.stats.mean>`))
Function to perform the local regression in the functional response
case. By default used the mean. Can the neighbors of a test sample,
and if weights != 'uniform' an array of weights as second parameter.
algorithm : {'auto', 'ball_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`sklearn.neighbors.BallTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm based on
the values passed to :meth:`fit` method.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, (default
:func:`lp_distance <skfda.misc.metrics.lp_distance>`)
the distance metric to use for the tree. The default metric is
the L2 distance. See the documentation of the metrics module
for a list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
Doesn't affect :meth:`fit` method.
multivariate_metric : boolean, optional (default = False)
Indicates if the metric used is a sklearn distance between vectors (see
:class:`sklearn.neighbors.DistanceMetric`) or a functional metric of
the module :mod:`skfda.misc.metrics`.
Examples
--------
Firstly, we will create a toy dataset with gaussian-like samples shifted.
>>> from skfda.ml.regression import KNeighborsRegressor
>>> from skfda.datasets import make_multimodal_samples
>>> from skfda.datasets import make_multimodal_landmarks
>>> y = make_multimodal_landmarks(n_samples=30, std=.5, random_state=0)
>>> y_train = y.flatten()
>>> X_train = make_multimodal_samples(n_samples=30, std=.5, random_state=0)
>>> X_test = make_multimodal_samples(n_samples=5, std=.05, random_state=0)
We will fit a K-Nearest Neighbors regressor to regress a scalar response.
>>> neigh = KNeighborsRegressor()
>>> neigh.fit(X_train, y_train)
KNeighborsRegressor(algorithm='auto', leaf_size=30,...)
We can predict the modes of new samples
>>> neigh.predict(X_test).round(2) # Predict test data
array([ 0.38, 0.14, 0.27, 0.52, 0.38])
Now we will create a functional response to train the model
>>> y_train = 5 * X_train + 1
>>> y_train
FDataGrid(...)
We train the estimator with the functional response
>>> neigh.fit(X_train, y_train)
KNeighborsRegressor(algorithm='auto', leaf_size=30,...)
And predict the responses as in the first case.
>>> neigh.predict(X_test)
FDataGrid(...)
See also
--------
:class:`~skfda.ml.classification.KNeighborsClassifier`
:class:`~skfda.ml.classification.RadiusNeighborsClassifier`
:class:`~skfda.ml.classification.NearestCentroids`
:class:`~skfda.ml.regression.RadiusNeighborsRegressor`
:class:`~skfda.ml.clustering.NearestNeighbors`
Notes
-----
See Nearest Neighbors in the sklearn online documentation for a discussion
of the choice of ``algorithm`` and ``leaf_size``.
This class wraps the sklearn regressor
`sklearn.neighbors.KNeighborsRegressor`.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform', regressor='mean',
algorithm='auto', leaf_size=30, metric='l2',
metric_params=None, n_jobs=1, multivariate_metric=False):
"""Initialize the regressor."""
super().__init__(n_neighbors=n_neighbors,
weights=weights, algorithm=algorithm,
leaf_size=leaf_size, metric=metric,
metric_params=metric_params, n_jobs=n_jobs,
multivariate_metric=multivariate_metric)
self.regressor = regressor
def _init_multivariate_estimator(self, sklearn_metric):
"""Initialize the sklearn K neighbors estimator.
Args:
sklearn_metric: (pyfunc or 'precomputed'): Metric compatible with
sklearn API or matrix (n_samples, n_samples) with precomputed
distances.
Returns:
Sklearn K Neighbors estimator initialized.
"""
from sklearn.neighbors import (KNeighborsRegressor as
_KNeighborsRegressor)
return _KNeighborsRegressor(
n_neighbors=self.n_neighbors, weights=self.weights,
algorithm=self.algorithm, leaf_size=self.leaf_size,
metric=sklearn_metric, metric_params=self.metric_params,
n_jobs=self.n_jobs)
def _query(self, X):
"""Return distances and neighbors of given sample."""
return self.estimator_.kneighbors(X)
class RadiusNeighborsRegressor(NeighborsBase, NeighborsRegressorMixin,
RadiusNeighborsMixin):
"""Regression based on neighbors within a fixed radius.
Regression with scalar, multivariate or functional response.
The target is predicted by local interpolation of the targets associated of
the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
regressor : callable, optional ((default =
:func:`mean <skfda.exploratory.stats.mean>`))
Function to perform the local regression in the functional response
case. By default used the mean. Can the neighbors of a test sample,
and if weights != 'uniform' an array of weights as second parameter.
algorithm : {'auto', 'ball_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`sklearn.neighbors.BallTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, (default
:func:`lp_distance <skfda.metrics.lp_distance>`)
the distance metric to use for the tree. The default metric is
the L2 distance. See the documentation of the metrics module
for a list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
outlier_response : :class:`FData`, optional (default = None)
Default response in the functional response case for test samples
without neighbors.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
multivariate_metric : boolean, optional (default = False)
Indicates if the metric used is a sklearn distance between vectors (see
:class:`sklearn.neighbors.DistanceMetric`) or a functional metric of
the module :mod:`skfda.misc.metrics`.
Examples
--------
Firstly, we will create a toy dataset with gaussian-like samples shifted.
>>> from skfda.ml.regression import RadiusNeighborsRegressor
>>> from skfda.datasets import make_multimodal_samples
>>> from skfda.datasets import make_multimodal_landmarks
>>> y = make_multimodal_landmarks(n_samples=30, std=.5, random_state=0)
>>> y_train = y.flatten()
>>> X_train = make_multimodal_samples(n_samples=30, std=.5, random_state=0)
>>> X_test = make_multimodal_samples(n_samples=5, std=.05, random_state=0)
We will fit a Radius-Nearest Neighbors regressor to regress a scalar
response.
>>> neigh = RadiusNeighborsRegressor(radius=0.2)
>>> neigh.fit(X_train, y_train)
RadiusNeighborsRegressor(algorithm='auto', leaf_size=30,...)
We can predict the modes of new samples
>>> neigh.predict(X_test).round(2) # Predict test data
array([ 0.39, 0.07, 0.26, 0.5 , 0.46])
Now we will create a functional response to train the model
>>> y_train = 5 * X_train + 1
>>> y_train
FDataGrid(...)
We train the estimator with the functional response
>>> neigh.fit(X_train, y_train)
RadiusNeighborsRegressor(algorithm='auto', leaf_size=30,...)
And predict the responses as in the first case.
>>> neigh.predict(X_test)
FDataGrid(...)
See also
--------
:class:`~skfda.ml.classification.KNeighborsClassifier`
:class:`~skfda.ml.classification.RadiusNeighborsClassifier`
:class:`~skfda.ml.classification.NearestCentroids`
:class:`~skfda.ml.regression.KNeighborsRegressor`
:class:`~skfda.ml.clustering.NearestNeighbors`
Notes
-----
See Nearest Neighbors in the sklearn online documentation for a discussion
of the choice of ``algorithm`` and ``leaf_size``.
This class wraps the sklearn classifier
`sklearn.neighbors.RadiusNeighborsClassifier`.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform', regressor='mean',
algorithm='auto', leaf_size=30, metric='l2',
metric_params=None, outlier_response=None, n_jobs=1,
multivariate_metric=False):
"""Initialize the classifier."""
super().__init__(radius=radius, weights=weights, algorithm=algorithm,
leaf_size=leaf_size, metric=metric,
metric_params=metric_params, n_jobs=n_jobs,
multivariate_metric=multivariate_metric)
self.regressor | |
import time
import math
from dronekit import *
from pymavlink import mavutil
"""
Most functions come from examples from dronekit, specifically the guided example
http://python.dronekit.io/examples/guided-set-speed-yaw-demo.html#guided-example-source-code
"""
def arm_and_takeoff(vehicle, aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude (meters).
"""
print "Basic pre-arm checks"
# Don't try to arm until autopilot is ready
while not vehicle.is_armable:
print " Waiting for vehicle to initialise..."
time.sleep(1)
print "Arming motors"
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
# Confirm vehicle armed before attempting to take off
while not vehicle.armed:
print " Waiting for arming..."
time.sleep(1)
print "Taking off!"
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print " Altitude: ", vehicle.location.global_relative_frame.alt
# Break and return from function just below target altitude.
if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95:
print "Reached target altitude"
break
time.sleep(1)
"""
Convenience functions for sending immediate/guided mode commands to control the Copter.
The set of commands demonstrated here include:
* MAV_CMD_CONDITION_YAW - set direction of the front of the Copter (latitude, longitude)
* MAV_CMD_DO_SET_ROI - set direction where the camera gimbal is aimed (latitude, longitude, altitude)
* MAV_CMD_DO_CHANGE_SPEED - set target speed in metres/second.
The full set of available commands are listed here:
http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/
"""
def condition_yaw(vehicle, heading, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 #yaw relative to direction of travel
else:
is_relative = 0 #yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, #command
0, #confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
def set_roi(vehicle, location):
"""
Send MAV_CMD_DO_SET_ROI message to point camera gimbal at a
specified region of interest (LocationGlobal).
The vehicle may also turn to face the ROI.
For more information see:
http://copter.ardupilot.com/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_do_set_roi
"""
# create the MAV_CMD_DO_SET_ROI command
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_DO_SET_ROI, #command
0, #confirmation
0, 0, 0, 0, #params 1-4
location.lat,
location.lon,
location.alt
)
# send command to vehicle
vehicle.send_mavlink(msg)
"""
Functions to make it easy to convert between the different frames-of-reference. In particular these
make it easy to navigate in terms of "metres from the current position" when using commands that take
absolute positions in decimal degrees.
The methods are approximations only, and may be less accurate over longer distances, and when close
to the Earth's poles.
Specifically, it provides:
* get_location_metres - Get LocationGlobal (decimal degrees) at distance (m) North & East of a given LocationGlobal.
* get_distance_metres - Get the distance between two LocationGlobal objects in metres
* get_bearing - Get the bearing in degrees to a LocationGlobal
"""
def get_location_metres(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation=LocationGlobal(newlat, newlon,original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation=LocationGlobalRelative(newlat, newlon,original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation;
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(aLocation1, aLocation2):
"""
Returns the bearing between the two LocationGlobal objects passed as parameters.
This method is an approximation, and may not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
off_x = aLocation2.lon - aLocation1.lon
off_y = aLocation2.lat - aLocation1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing
"""
Functions to move the vehicle to a specified position (as opposed to controlling movement by setting velocity components).
The methods include:
* goto_position_target_global_int - Sets position using SET_POSITION_TARGET_GLOBAL_INT command in
MAV_FRAME_GLOBAL_RELATIVE_ALT_INT frame
* goto_position_target_local_ned - Sets position using SET_POSITION_TARGET_LOCAL_NED command in
MAV_FRAME_BODY_NED frame
* goto - A convenience function that can use Vehicle.simple_goto (default) or
goto_position_target_global_int to travel to a specific position in metres
North and East from the current location.
This method reports distance to the destination.
"""
def goto_position_target_global_int(vehicle, aLocation):
"""
Send SET_POSITION_TARGET_GLOBAL_INT command to request the vehicle fly to a specified LocationGlobal.
For more information see: https://pixhawk.ethz.ch/mavlink/#SET_POSITION_TARGET_GLOBAL_INT
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111111000, # type_mask (only speeds enabled)
aLocation.lat*1e7, # lat_int - X Position in WGS84 frame in 1e7 * meters
aLocation.lon*1e7, # lon_int - Y Position in WGS84 frame in 1e7 * meters
aLocation.alt, # alt - Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT
0, # X velocity in NED frame in m/s
0, # Y velocity in NED frame in m/s
0, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def goto_position_target_local_ned(vehicle, north, east, down):
"""
Send SET_POSITION_TARGET_LOCAL_NED command to request the vehicle fly to a specified
location in the North, East, Down frame.
It is important to remember that in this frame, positive altitudes are entered as negative
"Down" values. So if down is "10", this will be 10 metres below the home altitude.
Starting from AC3.3 the method respects the frame setting. Prior to that the frame was
ignored. For more information see:
http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_local_ned
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111111000, # type_mask (only positions enabled)
north, east, down, # x, y, z positions (or North, East, Down in the MAV_FRAME_BODY_NED frame
0, 0, 0, # x, y, z velocity in m/s (not used)
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle
vehicle.send_mavlink(msg)
def goto(vehicle, dNorth, dEast, gotoFunction=None):
"""
Moves the vehicle to a position dNorth metres North and dEast metres East of the current position.
The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for
the target position. This allows it to be called with different position-setting commands.
By default it uses the standard method: dronekit.lib.Vehicle.simple_goto().
The method reports the distance to target every two seconds.
"""
| |
<reponame>h4yn0nnym0u5e/OSCAudio<gh_stars>1-10
import re
import fileinput
import os
import json
##############################################################################################
# User settings
dynamic = True
ftrl = ['play_wav' # files to reject
]
limit = 1000
rp = '../../Audio'
idxf = '../../Audio/gui/index.html'
# USB audio is in cores, not the Audio library. At time of writing
# Teensy 3.x and 4.x have the same class structure.
ar = 'c:/Program Files (x86)/Arduino/hardware/teensy/avr/cores/teensy4/'
afl = ['usb_audio.h']
##############################################################################################
def printFn(base,fnT):
print("%s,%s,%s,%s," % ((base,)+fnT))
def paramToType(p):
result = None
p = re.sub('\s*=.*$','',p); # remove default value, if present
pq = re.sub('\w+$','',p).strip() # remove word from end
if '' != pq: # deal with type-only declaration
p = pq
if re.search('float',p):
result = 'f'
if re.search('(short|int|signed)',p):
result = 'i'
m = re.search('(AudioFilterLadderInterpolation|behaviour_e|AudioEffectDelayMemoryType_t)',p)
if m:
result = ('i',m.group(1)) # integer, but cast to enum
if re.search('void',p):
result = 'v'
if re.search('bool',p):
result = ';'
# put these last: char* is string,
# any other type* is blob
if re.search('[*&]',p):
result = 'b'
if re.search('char\s+[*]',p):
result = 's'
return result
##############################################################################################
undefinedFns = { # member functions declared but not actually defined anywhere
'AudioAnalyzePrint': ['.*trigger.*float.*int']
,'AudioPlayQueue': ['.*stop.*void']
,'AudioOutputSPDIF3': ['.*static.*pll_locked.*void']
}
if not dynamic:
undefinedFns['AudioAmplifier'] = ['.*void.*slew.*bool']
def alterPtypeFBQ(li,sf):
result = ('iddddd','setC*')
if re.search(r'int\s*[*]',li):
result = ('iiiiii','setC*')
return result
helperFns = { # member functions which need special "helpers"
'AsyncAudioInputSPDIF3': [{'zapCon': 'y'}],
'AudioAnalyzeFFT1024~windowFunction': ['s',';','bool',{'doc': 'string selecting one of the built-in window types, e.g. "Hanning"'}],
'AudioAnalyzeFFT256~windowFunction': ['s',';','bool',{'doc': 'string selecting one of the built-in window types, e.g. "BlackmanHarris"'}],
'AudioAnalyzePrint~name': ['s',';','bool',{'dtor': 'free(namePtr);','vars': ['char* namePtr']}],
'AudioAnalyzePrint': [{'init': ', namePtr(0)'}],
'AudioControlSGTL5000~eqFilter': ['ib','void','void',{'doc': 'filter number; blob holding 7 int32 parameters'}],
'AudioControlTLV320AIC3206~setIIRCoeffOnADC': ['ib','void','void',{'doc': 'filter channel; blob holding 3 uint32 coefficients'}],
'AudioEffectGranular~begin': ['bi','void','void',{'dtor': 'free(sample_bank);', 'vars': ['int16_t* sample_bank'],'doc': 'blob holding 16-bit grain data; number of samples in data'}],
'AudioEffectGranular': [{'init': ', sample_bank(0)'}],
'AudioFilterBiquad~setCoefficients': ['iddddd','void','void',{'ptype': alterPtypeFBQ,'doc': 'filter stage number, followed by 5 coefficient values'}],
'AudioPlayMemory~play': ['i','void','void',{'doc': '32-bit integer pointing to valid data built in to sketch'}],
'AudioPlaySdRaw~play': ['s','void','void'],
'AudioPlaySdWav~play': ['s','void','void'],
'AudioPlaySerialflashRaw~play': ['s','void','void'],
'AudioSynthWaveform~arbitraryWaveform': ['bf',';','bool',{'dtor': 'free(arbdata);', 'init': ', arbdata(0)', 'vars': ['int16_t* arbdata'],'doc': 'blob containing 256 samples; float is currently unused'}],
'AudioSynthWaveform': [{'init': ', arbdata(0)'}],
'AudioSynthWaveformModulated~arbitraryWaveform': ['bf',';','bool',{'dtor': 'free(arbdata);', 'init': ', arbdata(0)','vars': ['int16_t* arbdata'],'doc': 'blob containing 256 samples; float is currently unused'}],
'AudioSynthWaveformModulated': [{'init': ', arbdata(0)'}],
}
getd = {
'i': 'getInt(%d)',
'f': 'getFloat(%d)',
's': 'getString(%d)',
'b': 'getBlob(%d)',
';': 'getBoolean(%d)',
}
# return value cast dict
rtcd = {
'boolean': 'bool',
'envelopeStateEnum': 'uint8_t',
'AudioEffectDelayMemoryType_t': 'int',
'double': 'float',
'int': 'int32_t',
'int16_t *': 'uint32_t',
'unsigned': 'uint32_t',
'unsigned char': 'uint8_t',
'unsigned int': 'uint32_t',
'unsigned short': 'uint16_t'
}
notD = ['AudioAnalyzeEvent', 'AudioEffectExpEnvelope', 'AudioMixer', 'AudioMixerStereo'] # not in static library
notEver = ['AudioMixerBase'] # not in either library
returnTypes={}
##############################################################################################
# Convert dict entry to resource settings enums
def rsrcEnum(d):
dr = {}
if 'setting' in d: # shareable
dr['setting'] = 'setg_' + d['setting'].replace(' ','_')
else:
dr['setting'] = 'setgUnshareable'
dr['resource'] = 'rsrc_' + d['resource'].replace(' ','_')
return dr
##############################################################################################
def mkClass(cn,fd):
cno = cn.replace('Audio','OSCAudio')
init = ''
if cn in helperFns and 'init' in helperFns[cn][0]:
init = helperFns[cn][0]['init']
s = ''
tst = 'if' # first test for address match is "if" statement: rest will be "else if"
pfd={}
varl = []
ctorBody = ''
dtorBody = ''
for fi in sorted(fd): # go through public functions
if '~' not in fi: # class-global stuff, not a function
continue
fn = fd[fi]['f'] # function name
sf = fd[fi]['s'] # shortened function name
li = fd[fi]['l'] # raw line from original declaration
rt = fd[fi]['r'] # return type
rt = rt.replace('friend','').replace('static','').strip()
rtc= ''
if rt in rtcd:
rtc = '(' + rtcd[rt] + ')'
pt = ''
pl = ''
cmnt = ''
for i in range(len(fd[fi]['p'])): # go through parameter list
cast = ''
p = fd[fi]['p'][i]
t = paramToType(p)
if isinstance(t,tuple):
cast = '(' + t[1] + ') '
t = t[0]
if t and t != 'v':
pt += t
pl += F',{cast}msg.' + (getd[t] % i)
if t and (t == 'b' or t == 's'):
cmnt = '// ' # comment out functions needing blobs and strings, for now
# Special case rejection of functions declared
# but not defined etc.
if cn in undefinedFns:
for rex in undefinedFns[cn]:
if re.search(rex,li):
cmnt = '// NOT DEFINED: '
# Special cases for methods that need a "helper function"
# to work properly
hfi = cn+'~'+fn
if hfi in helperFns:
pt,rt = helperFns[hfi][:2]
pl = '~msg'
cmnt = ''
pfd[fn] = helperFns[hfi]
if len(helperFns[hfi]) > 3:
hfd = helperFns[hfi][3]
if 'dtor' in hfd:
dtorBody = ' '.join((dtorBody,hfd['dtor']))
if 'vars' in hfd:
varl += hfd['vars']
if 'ptype' in hfd:
pt,sf = hfd['ptype'](li,sf)
if 'doc' in hfd:
fd[fi]['doc'] = hfd['doc']
if len(pt) > 0:
pt = '"'+pt+'"'
else:
pt = 'NULL'
returnTypes[rt] = 'y'
fd[fi]['o'] = pt
if '' != cmnt:
fd[fi]['exclude'] = 'y'
if 'void' == rt:
s += f' {cmnt}{tst} (isTarget(msg,addrOff,"/{sf}",{pt})) {{{fn}({pl[1:]}); addReplyExecuted(msg,addrOff,reply,nameOfTarget);}} // {li}\n'
else:
s += f' {cmnt}{tst} (isTarget(msg,addrOff,"/{sf}",{pt})) {{addReplyResult(msg,addrOff,reply,{rtc}{fn}({pl[1:]}),nameOfTarget); }} // {li}\n'
if '' == cmnt:
tst = 'else if'
# ours, but no method matched: say so
final = 'else '
if 'if' == tst:
final = ''
s += f' {final}addReplyResult(msg,addrOff,reply,false,nameOfTarget,INVALID_METHOD);\n'
# now go though any private helper functions we may need
s += ''' }\n\t\t}\n'''
if len(pfd) > 0:
s += '\tprivate:\n'
for fn in pfd:
s += f'\t\t{pfd[fn][2]} {fn}(OSCMessage& msg);\n'
if len(varl) > 0:
for vdec in varl:
s += f'\t\t{vdec};\n'
# deal with constructor parameters
if cn in helperFns and 'zapCon' in helperFns[cn][0]: # insane constructors!
fd['cp'] = ''
cp = ''
cinit = ''
oscpt = ''
if 'cp' in fd and '' != fd['cp']:
spl=fd['cp'].split(',')
cinit = F'{cn}('
for sp in spl:
sp = re.sub('=[^,]*','',sp) # remove parameter defaults
oscpt += paramToType(sp)[0]
sp = sp.split(' ')
cinit = cinit + F'{sp[-1]}, '
cinit = cinit[:-2] + '), ' + F'/* {oscpt} */ '
fd['oscpt'] = oscpt
cp = ', '+fd['cp'] # append constructor parameters to derived class
# resource checking
if dynamic and 'excl' in fd:
rsrcCheck = '\n '
rcnt = len(fd['excl'])
rsrcCheck += F'const static OSCAudioResourceCheck_t resources[{rcnt}];\n'
rsrcCheck += F' static rsrcState_e rsrcState;\n'
ctorBody += ' rsrcState = rsrcThisActive;'
dtorBody += ' rsrcState = rsrcThisDormant;'
rsrcArrayValue = '\n#if defined(OSC_RSRC_ENABLE_DEFINE_ARRAYS)'
rsrcArrayValue += F'\nconst OSCAudioResourceCheck_t {cno}::resources[] = {{\n'
for rsrc in fd['excl']:
rd = rsrcEnum(rsrc)
rsrcArrayValue += F" {{{rd['resource']},{rd['setting']}}},\n"
rsrcArrayValue += '};\n'
rsrcArrayValue += F'rsrcState_e {cno}::rsrcState;\n'
rsrcArrayValue += '#endif // defined(OSC_RSRC_ENABLE_DEFINE_ARRAYS)\n'
else:
rsrcCheck = ''
rsrcArrayValue = ''
# start of class definition
dtorLine = ''
if '' != dtorBody:
dtorLine = F''' ~{cno}() {{{dtorBody}}} \n'''
if dynamic: # need destructor and resource check for dynamic library
s = F'''class {cno} : public {cn}, public OSCAudioBase
{{
public:
{cno}(const char* _name{cp}) : {cinit} OSCAudioBase(_name, (AudioStream*) this){init} {{{ctorBody}}}
{cno}(const char* _name, OSCAudioGroup& grp{cp}) : {cinit} OSCAudioBase(_name, grp, (AudioStream*) this){init} {{{ctorBody}}}
{dtorLine}{rsrcCheck}
void route(OSCMessage& msg, int addrOff, OSCBundle& reply)
{{
int nameOff;
if ((nameOff = isMine(msg,addrOff)) > 0)
{{
addrOff += nameOff;
char* nameOfTarget;
nameOfTarget = alloca(getPathNameLength(this)+1);
if (NULL != nameOfTarget)
getPathNameTo(this,nameOfTarget);
''' + s
else: # no destructor, resource check etc.
s = F'''class {cno} : public {cn}, public OSCAudioBase
{{
public:
{cno}(const char* _name{cp}) : {cinit} OSCAudioBase(_name, (AudioStream*) this){init} {{}}
{cno}(const char* _name, OSCAudioGroup& grp{cp}) : {cinit} OSCAudioBase(_name, grp, (AudioStream*) this){init} {{{ctorBody}}}
void route(OSCMessage& msg, int addrOff, OSCBundle& reply)
{{
int nameOff;
if ((nameOff = isMine(msg,addrOff)) > 0)
{{
addrOff += nameOff;
char* nameOfTarget;
nameOfTarget = alloca(getPathNameLength(this)+1);
if (NULL != nameOfTarget)
getPathNameTo(this,nameOfTarget);
''' + s
# end of class definition
s += F'''}};{rsrcArrayValue}
'''
return s
##############################################################################################
def mkShort(d):
ac = 1
while True:
# make a list of shortened function names
# of length al, for every function which doesn't
# already have a shortened name
shrtd={}
fnd = {}
for fn in d:
if '~' not in fn:
continue
if 's' not in d[fn]:
shrt = d[fn]['f'][:ac]
kt = d[fn]['f']
if shrt not in shrtd:
shrtd[shrt] = [kt]
fnd[shrt] = [fn]
else:
fnd[shrt] += [fn]
if kt not in shrtd[shrt]:
shrtd[shrt] += [kt]
# find all shortened names | |
"0",
# "http": { "_datetime": " May 14 10:46:59", "_ident": "1829102019201192", "_type": "http", "method": "GET",
# "usergaent": "Mozilla Firefix 1.0", "cookie": "1029100929101834810380101308084745785723978",
# "fullurl": "https://www.adisa.com/index.html"} } } } }
#
def insertIPTCPHTTP(self, jdata, digest):
datetime = jdata['_datetime']
method = jdata['method']
usergaent = jdata['usergaent']
cookie = jdata['cookie']
fullurl = jdata['fullurl']
#
self.DBInsert('tcphttp', digest, TCPHTTPParticle('tcphttp', digest, datetime, method, usergaent, cookie, fullurl))
#
self._vectorSpace.addParticleList(0,0,0,('tcphttp', digest ))
#
if (DEBUG):
print('Identifier :-> TCP/IP HTTP/', digest)
print('Datatime :->', datetime)
print('HTTP Method :->', method)
print('HTTP User Agent:->', usergaent)
print('HTTP Cookie :->', cookie)
print('The Full URL :->', fullurl)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> SYSLOG JSON Event Data Structure is as follows:
# {"event": { "syslog": { "_datetime": "Apr 29 15:00:14", "_ident": "3520479944", "_type": "syslog",
# "system": { "_type": "name","__text": "server-192.178.67.1" },"process": { "_type": "proc","__text": "systemd:" },
# "message": { "_type": "ascii","__text": "Started Vsftpd ftp daemon." } }, "_datetime": "Apr 29 15:00:14",
# "_ident": "2298700593","_type": "syslog" } }
#
def insertSyslog(self, jdata, digest):
datetime = jdata['syslog']['_datetime']
system = jdata['syslog']['system']['__text']
processname = jdata['syslog']['process']['__text'].split('[')[0]
if ("[" in jdata['syslog']['process']['__text']):
processid = jdata['syslog']['process']['__text'].split('[')[1].split(']')[0]
else:
processid = 'NULL'
#
self.DBInsert('syslog', digest, SyslogParticle('syslog', digest, datetime,system, processname, processid, jdata['syslog']))
#
self._vectorSpace.addParticleList(0,0,0,('syslog', digest ))
#
if (DEBUG):
print('Identifier :-> SYSLOG/', digest)
print('Datatime :->', datetime)
print('System :->', system)
print('Process Name :->', processname)
print('Process ID :->', processid)
print('Message :->', jdata['syslog'])
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> FTPD JSON Event Data Structure is as follows:
# {"event": { "ftpd": { "_datetime": "Mar 28 19:59:31", "_ident": "136661321", "_type": "ftpd",
# "transfertime": "5057", "remotehost": { "_type": "ipv4","__text": "192.168.20.10" }, "filesize": "8196242",
# "filename": "/home/ajcblyth/datalog2.tar.gz", "transfertype": "b", "actionflag": "_", "direction": "o",
# "accessmode": "r", "username": "ajcblyth", "servicename": "ftp", "authmethod": "0", "authuserid": "*",
# "status": "c" }, "_datetime": "Mar 28 19:59:31", "_ident": "1290283528", "_type": "ftpd" } }
#
def insertFTPD(self, jdata, digest):
datetime = jdata['_datetime']
transfertime = jdata['transfertime']
remotehost= jdata['remotehost']['__text']
filesize = jdata['filesize']
filename = jdata['filename']
transfertype = jdata['transfertype']
actionflag = jdata['actionflag']
direction = jdata['direction']
accessmode = jdata['accessmode']
username = jdata['username']
servicename = jdata['servicename']
authmethod = jdata['authmethod']
authuserid = jdata['authuserid']
status = jdata['status']
#
self.DBInsert('ftpd', digest, FTPDParticle('ftpd', digest, datetime, transfertime, remotehost, remotehost,
filename, transfertype, actionflag, direction, accessmode,
username, servicename, authmethod, authuserid, status ))
#
self._vectorSpace.addParticleList(0,0,0,('ftpd', digest ))
#
if (DEBUG):
print('Identifier :-> FTPD/', digest)
print('Datatime :->', datetime)
print('Transfer Time :->', transfertime)
print('Remote Host :->', remotehost)
print('File Size :->', filesize)
print('File Name :->', filename)
print('Transfer Type :->', transfertype)
print('Action Flag :->', actionflag)
print('Direction :->', direction)
print('Access Mode :->', accessmode)
print('USername :->', username)
print('Service Name :->', servicename)
print('authmethod :->', authmethod)
print('authuserid :->', authuserid)
print('Status :->', status )
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> TCPD JSON Event Data Structure is as follows:
# {"event": { "syslog": { "_datetime": "Mar 28 19:23:11", "_ident": "1571927734", "_type": "tcpd",
# "system": { "_type": "name","__text": "server-03" },"process": { "_type": "papid","__text": "in.telnetd[1140]:" },
# "message": { "_type": "ascii","__text": "Connect from 10.63.148.185" }, "tcpd": { "datetime": { "_type": "std",
# "__text": "Mar 28 19:23:11" }, "system": { "_type": "ascii","__text": "server-03" },
# "process": { "_type": "papid","__text": "in.telnetd[1140]:" },
# "message": { "_type": "ascii","__text": "10.63.148.185" }, "_datetime": "Mar 28 19:23:11",
# "_ident": "95084021", "_type": "tcpd" } } ,"_datetime": "Mar 28 19:23:11", "_ident": "1577131332",
# "_type": "syslog" } }
#
def insertTCPD(self, jdata, digest):
datetime = jdata['datetime']['__text']
system = jdata['system']['__text']
process = jdata['process']['__text']
connection = jdata['message']['__text']
#
self.DBInsert('tcpd', digest, TCPDParticle('tcpd', digest, datetime, system, process, connection))
#
self._vectorSpace.addParticleList(0,0,0,('tcpd', digest ))
#
if (DEBUG):
print('Identifier :-> TCPD/', digest)
print('Datatime :->', datetime)
print('System :->', system)
print('Process Name :->', process)
print('Connection :->', connection)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> SNORT JSON Event Data Structure is as follows:
# {"event": { "syslog": { "_datetime": "Apr 29 14:24:14", "_ident": "1121724743", "_type": "snort",
# "system": { "_type": "name","__text": "localhost" },"process": { "_type": "proc","__text": "snort:" },
# "message": { "_type": "ascii","__text": "[1:1000006:1] Nmap XMAS Tree Scan {TCP} 192.168.1.100:64174 ->
# 192.168.1.252:6001" }, "snort": { "_datetime": "Apr 29 14:24:14", "_ident": "1739781287",
# "_type": "snort","datatime": { "_type": "std","__text": "Apr 29 14:24:14" },
# "system": { "_type": "name","__text": "localhost" },"process": { "_type": "proc","__text": "snort:" },
# "version": "[1:1000006:1]", "class": "NULL","priority": "NULL",
# "message": { "_type": "ascii","__text": "Nmap XMAS Tree" },
# "protocol": "TCP","sourceip": { "_type": "ipv4","__text": "192.168.1.100" },
# "sourceport": "64174","destinationip": { "_type": "ipv4","__text": "192.168.1.252" },
# "destinationport": "6001" } },"_datetime": "Apr 29 14:24:14", "_ident": "1011893180", "_type": "syslog" } }
#
def insertSnort(self, jdata, digest):
datetime = jdata['_datetime']
system = jdata['system']['__text']
version = jdata['version']
classification = jdata['class']
priority = jdata['priority']
protocol = jdata['protocol']
srcport = jdata['sourceport']
dstport = jdata['destinationport']
if ("[" in jdata['process']['__text']):
processname = jdata['process']['__text'].split('[')[0]
processid = jdata['process']['__text'].split('[')[1].split(']')[0]
else:
processname = jdata['process']['__text'].split('[')[0].split(':')[0]
processid = 'NULL'
message = jdata['message']['__text']
src = jdata['sourceip']['__text']
dst = jdata['destinationip']['__text']
#
self.DBInsert('snort', digest, SnortParticle('snort', digest, datetime,
system, processname, processid,
version, classification, priority,
protocol, message, src, srcport,
dst, dstport, jdata))
#
self._vectorSpace.addParticleList(0,0,0,('snort', digest ))
#
if (DEBUG):
print('Identifier :-> SNORT/', digest)
print('Datatime :->', datetime)
print('System :->', system)
print('Process Name :->', processname)
print('Process ID :->', processid)
print('Version :->', version)
print('Classification :->', classification)
print('Priority :->', priority)
print('Protocol :->', protocol)
print('Message :->', message)
print('Src IP Address :->', src)
print('Src Port :->', srcport)
print('DST IP Address :->', dst)
print('Dst Port :->', dstport)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
#
return digest
#
# -> SSHD JSON Event Data Structure is as follows:
# {"event": { "syslog": { "_datetime": "Apr 29 13:13:35", "_ident": "2289628760", "_type": "sshd",
# "system": { "_type": "name","__text": "localhost" },"process": { "_type": "papid","__text": "sshd[11690]:" },
# ] "message": { "_type": "ascii","__text": "pam_unix(sshd:session): session closed for user ajcblyth" },
# "sshd" : {"_datetime": "Apr 29 13:13:35","_ident": "5449253743","_type": "sshd",
# "system": { "_type": "name","__text": "localhost" },"process": { "_type": "papid","__text": "sshd[11690]:" },
# "sourceaddr": { "_type": "ipv4","__text": "NULL" },"sourceport": "NULL","username": "NULL",
# "message": { "_type": "ascii","__text": "pam_unix(sshd:session): session closed for user ajcblyth" } } },
# "_datetime": "Apr 29 13:13:35","_ident": "3159624982","_type": "syslog" } }
#
def insertSSHD(self, jdata, digest):
datetime = jdata['_datetime']
system = jdata['system']['__text']
process = jdata['process']['__text']
sourceaddr = jdata['sourceaddr']['__text']
sourceport = jdata['sourceport']
username = jdata['username']
message = jdata['message']['__text']
#
self.DBInsert('sshd', digest, SSHDParticle('sshd', digest, datetime, system, process,
sourceaddr, sourceport, username, message))
#
self._vectorSpace.addParticleList(0,0,0,('sshd', digest ))
#
if (DEBUG):
print('Identifier :-> SSHD/', digest)
print('Datatime :->', datetime)
print('System :->', system)
print('Process[ID] :->', process)
print('Source IP Addr :->', sourceaddr)
print('Source Port :->', sourceport)
print('Username :->', username)
print('Message :->', message)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
return True
#
# -> VSFTPD JSON Event Data Structure is as follows:
# {"event": { "syslog": { "_datetime": "Apr 29 15:03:19", "_ident": "3176596008", "_type": "vsftpd",
# "system": { "_type": "name","__text": "localhost" },"process": { "_type": "papid","__text": "vsftpd[17468]" },
# "message": { "_type": "ascii","__text": "[ftp] FTP response: Client fdf8:f53e:61e4::18:192.168.1.100, 221 Goodbye." },
# "vsftpd": { "_datetime": "Apr 29 15:03:19", "_ident": "4565185013", "_type": "vsftpd","username": "ftp",
# "commandtype": "FTP response","sourceip": "192.168.1.100", "message": "221 Goodbye." } },
# "_datetime": "Apr 29 15:03:19", "_ident": "1388589004", "_type": "syslog" } }
#
def insertVSFTPD(self, jdata, digest):
datetime = jdata['_datetime']
username = jdata['username']
commandtype = jdata['commandtype']
sourceip = jdata['sourceip']
message = jdata['message']
#
self.DBInsert('vsftpd', digest, VSFTPDParticle('vsftpd', digest, datetime, sourceip, username,
commandtype , message))
#
self._vectorSpace.addParticleList(0,0,0,('vsftpd', digest ))
#
if (DEBUG):
print('Identifier :-> VSFTPD/', digest)
print('Datatime :->', datetime)
print('Source IP Addr :->', sourceip)
print('Username :->', username)
print('Command Typpe :->', commandtype)
print('Message :->', message)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
return digest
#
# -> CEF JSON Event Data Structure is as follows:
# "event": { "syslog": { "_datetime": "Mar 28 19:23:07", "_ident": "4029200141", "_type": "cef",
# "system": { "type": "name", "__text": "server-01" }, "process": { "type": "proc", "__text": "CEF:0" },
# "message": { "type": "proc", "__text": "CEF:0|security|threatmanager|1.0|100|poison ivy trojan infection
# successfully stopped|10|src=10.0.0.1 dst=2.1.2.2 " }, "cef": { "_datetime": "Mar 28 19:23:07",
# "_ident": "7563964276", "_type": "cef", "version": "CEF:0",
# "deviceinfo": "security threatmanager 1.0", "signature": "100",
# "name": { "type": "ascii", "__text": "poison ivy trojan infection successfully stopped" }, "severity": "10",
# | |
# This file is part of the sphinx-cmake_domain Sphinx extension.
# See <https://github.com/marcokoch/sphinx-cmake_comain> for recent information.
#
# # Copyright 2020 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from collections import defaultdict
from importlib.metadata import version, PackageNotFoundError
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.addnodes import (
desc_annotation, desc_name, desc_optional, desc_parameter,
desc_parameterlist, desc_signature)
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, ObjType
from sphinx.locale import get_translation
from sphinx.roles import XRefRole
from sphinx.util.docfields import Field, GroupedField
from sphinx.util.logging import getLogger
from sphinx.util.nodes import (
get_node_line, get_node_source, make_id, make_refnode, traverse_parent)
try:
__version__ = version("sphinx-cmake_domain")
except PackageNotFoundError:
# The package is not installed
from setuptools_scm import get_version
__version__ = get_version(root = "..", relative_to = __file__)
# Optional extension for module names
_module_ext = ".cmake"
message_catalog = "sphinx-cmake_domain"
_ = get_translation(message_catalog)
__ = get_translation(message_catalog, "console")
_logger = getLogger(__name__)
# Helper functions
# -------------------------------------------------------------
def _get_index_sort_str(env, name):
"""
Returns a string by which an object with the given name shall be sorted in
indices.
"""
ignored_prefixes = env.config.cmake_index_common_prefix
for prefix in ignored_prefixes:
if name.startswith(prefix) and name != prefix:
return name[len(prefix):]
return name
def _register_node(app, node_type):
"""Helper function for registering our custom node types."""
formats = ["html", "latex", "text", "man", "texinfo"]
tuples = {}
for output_format in formats:
tuples[output_format] = (
lambda tr, node, fmt = output_format: visit_node(tr, node, fmt),
lambda tr, node: depart_node(tr, node))
app.add_node(node_type, **tuples)
# Doctree nodes
# -------------------------------------------------------------
# <-- Insert rant about Sphinx' horrible translator API here --->
# For macro/function descriptions we use some custom nodes instead of the
# builtin ones (desc_parameter etc.), since the builtin translaters produce
# output that is not well suited for the syntax common in CMake world. Using
#custom nodes allows us to provide our own translation logic instead of fiddling
# with internal details of Sphinx' builtin translators. This makes us more less
# dependent on the Sphinx version and is more friendly to users who use their
# own translators.
class desc_cmake_parameterlist(desc_parameterlist):
"""
Doctree node similar to :cls:`sphinx.addnodes.desc_parameterlist` that
generates output which is better suited for CMake macro/function parameter
lists.
Nodes of this class can't be nested.
"""
child_text_separator = " "
class desc_cmake_parameter(desc_parameter):
"""
Doctree node similar to :cls:`sphinx.addnodes.desc_parameter` that
generates output which is better suited for CMake macro/function parameter
lists.
"""
pass
class desc_cmake_keyword(desc_cmake_parameter):
"""
Doctree node for a single keyword in a CMake macro/function parameter list.
Nodes of this class can only appear in a :cls:`desc_cmake_parameterlist`.
"""
pass
class desc_cmake_optional(desc_optional):
"""
Doctree node similar to :cls:`sphinx.addnodes.desc_optional` that generates
output which better suited for CMake macro/function parameter lists.
Nodes of this class can only appear in a :cls:`desc_cmake_parameterlist`.
"""
child_text_separator = " "
def astext(self):
return "[{}]".format(super().astext())
class desc_cmake_group(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""
Doctree node for a group of CMake macro/function parameters.
This is used to for complex parameter descriptions such as
``OPTION|(KEYWORD <values>...)``.
Nodes of this class can only appear in a :cls:`desc_cmake_parameterlist`.
"""
child_text_separator = " "
def astext(self):
return "({})".format(super().astext())
class desc_cmake_choice(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""
Doctree node for a choice of multiple CMake macro/function parameters
such as ``OPTION_A|OPTION_B``.
Nodes of this class can only appear in a :cls:`desc_cmake_parameterlist`.
"""
child_text_separator = "|"
def astext(self):
return "|".join(child.astext() for child in self.children)
class TranslatorState:
"""
Implements translation logic for our custom nodes.
An instance of this class gets injected into the Sphinx translator.
We do this instead of providing an entirely own set of translators since
the latter would be incompatible with other extensions using custom
translators. Furthermore, this solution should provide some out-of-the-box
support for user-defined translators and non-standard output formats.
For simplicity, this same class is used for all output formats.
"""
@property
def translator(self):
"""The translator to which this state belongs."""
return self._translator
@property
def output_format(self):
"""The output format of :attr:`translator`."""
return self._output_format
@property
def _param_separator(self):
"""
The current parameter seperator for a given output format.
If no seperation is needed, an empty string is returned.
"""
return "" if self._first_param else self._param_seperator_stack[-1]
def __init__(self, translator, output_format):
self._translator = translator
self._output_format = output_format
# A stack of seperators for use between parameter-style nodes.
# Entries are dictionaries mapping output formats to seperator strings.
# If no string is provided for a specific output format, the mapping
# with key "generic" is used (i.e. such key should always be present).
self._param_seperator_stack = [" "]
# True if the next parameter is the first in a group
self._first_param = False
# Current desc_cmake_parameterlist node. None if not inside such node.
self._paramlist_node = None
def _output(self, output):
"""
Outputs *output* to *translator* using the standard API of the builtin
translator for output format *output_format*.
This is mainly to work around the fact that Sphinx' builtin translator
for the plein text format uses a different API than all others.
"""
if self.output_format == "text":
self.translator.add_text(output)
else:
self.translator.body.append(output)
def _check_in_parameterlist(self, current_node):
"""
Logs an error and raises :cls:`docutils.nodes.SkipNode` if called
outside a :cls:`desc_cmake_parameterlist node.
"""
if not self._paramlist_node:
_logger.error(
_("A node of type {node_type} was encountered outside a "
"{paramlist_node_type}.").format(
node_type = type(node),
paramlist_node_type = desc_cmake_parameterlist),
location = current_node)
raise nodes.SkipNode()
def _handle_basic_parameter_visit(self, node):
"""
Implements some basic logic shared by the visitor functions for
parameter-style nodes.
"""
self._check_in_parameterlist(node)
if self._param_separator:
self._output(self._param_separator)
self._first_param = False
def visit_desc_cmake_parameterlist(self, node):
"""Visitor function for :cls:`desc_cmake_parameterlist`."""
if self._paramlist_node is not None:
_logger.error(
__("Encountered nested {paramlist_node_type} nodes. "
"Outer node defined here: {source_file}, {line}").format(
paramlist_node_type = desc_cmake_parameterlist,
source_file = get_node_source(self._paramlist_node),
line = get_node_line(self._paramlist_node)),
location = node)
raise nodes.SkipNode()
self._paramlist_node = node
self._first_param = True
if self.output_format == "html":
self._output('<span class="sig-paren">(</span>')
elif self.output_format == "latex":
self._output('}{')
else:
self._output("(")
def depart_desc_cmake_parameterlist(self, node):
"""Depart function for :cls:`desc_cmake_parameterlist`."""
self._paramlist_node = None
if self.output_format == "html":
self._output('<span class="sig-paren">)</span>')
elif self.output_format == "latex":
self._output("}{")
else:
self._output(")")
def visit_desc_cmake_parameter(self, node):
"""Visitor function for :cls:`desc_cmake_parameter`."""
self._handle_basic_parameter_visit(node)
if not node.hasattr("noemph"):
if self.output_format == "html":
self._output('<em class="sig-param sig-cmake-param">')
elif self.output_format == "latex":
self._output(r"\emph{")
def depart_desc_cmake_parameter(self, node):
"""Depart function for :cls:`desc_cmake_parameter`."""
if not node.hasattr("noemph"):
if self.output_format == "html":
self._output("</em>")
elif self.output_format == "latex":
self._output("}")
def visit_desc_cmake_keyword(self, node):
"""Visitor function for :cls:`desc_cmake_keyword`."""
if self.output_format == "html":
self._handle_basic_parameter_visit(node)
if not node.hasattr("noemph"):
self._output(
'<em class="sig-param sig-cmake-param sig-cmake-keyword">')
else:
self.visit_desc_cmake_parameter(node)
def depart_desc_cmake_keyword(self, node):
"""Depart function for :cls:`desc_cmake_keyword`."""
self.depart_desc_cmake_parameter(node)
def visit_desc_cmake_optional(self, node):
"""Visitor function for :cls:`desc_cmake_optional`."""
self._handle_basic_parameter_visit(node)
self._param_seperator_stack.append(node.child_text_separator)
self._first_param = True
if self.output_format == "html":
self._output('<span class="optional | |
<reponame>orbitalturtle/python-teos
import pytest
from multiprocessing import Event
from shutil import rmtree
from teos.api import API
import common.errors as errors
from teos.watcher import Watcher
from teos.inspector import Inspector
from teos.gatekeeper import UserInfo
from teos.internal_api import InternalAPI
from common.appointment import Appointment, AppointmentStatus
from teos.appointments_dbm import AppointmentsDBM
from teos.responder import Responder
from test.teos.conftest import config, create_txs
from test.teos.unit.conftest import get_random_value_hex, generate_keypair, compute_locator
import common.receipts as receipts
from common.cryptographer import Cryptographer, hash_160
from common.constants import (
HTTP_OK,
HTTP_NOT_FOUND,
HTTP_BAD_REQUEST,
HTTP_SERVICE_UNAVAILABLE,
LOCATOR_LEN_BYTES,
ENCRYPTED_BLOB_MAX_SIZE_HEX,
)
internal_api_endpoint = "{}:{}".format(config.get("INTERNAL_API_HOST"), config.get("INTERNAL_API_PORT"))
TEOS_API = "http://{}:{}".format(config.get("API_BIND"), config.get("API_PORT"))
register_endpoint = "{}/register".format(TEOS_API)
add_appointment_endpoint = "{}/add_appointment".format(TEOS_API)
get_appointment_endpoint = "{}/get_appointment".format(TEOS_API)
get_all_appointment_endpoint = "{}/get_all_appointments".format(TEOS_API)
get_subscription_info_endpoint = "{}/get_subscription_info".format(TEOS_API)
# Reduce the maximum number of appointments to something we can test faster
MAX_APPOINTMENTS = 100
MULTIPLE_APPOINTMENTS = 10
TWO_SLOTS_BLOTS = "A" * ENCRYPTED_BLOB_MAX_SIZE_HEX + "AA"
appointments = {}
locator_dispute_tx_map = {}
user_sk, user_pk = generate_keypair()
user_id = Cryptographer.get_compressed_pk(user_pk)
teos_sk, teos_pk = generate_keypair()
teos_id = Cryptographer.get_compressed_pk(teos_sk.public_key)
# A function that ignores the arguments and returns user_id; used in some tests to mock the result of authenticate_user
def mock_authenticate_user(*args, **kwargs):
return user_id
@pytest.fixture()
def get_all_db_manager():
manager = AppointmentsDBM("get_all_tmp_db")
# Add last know block for the Responder in the db
yield manager
manager.db.close()
rmtree("get_all_tmp_db")
@pytest.fixture(scope="module")
def internal_api(run_bitcoind, db_manager, gatekeeper, carrier, block_processor):
responder = Responder(db_manager, gatekeeper, carrier, block_processor)
watcher = Watcher(
db_manager, gatekeeper, block_processor, responder, teos_sk, MAX_APPOINTMENTS, config.get("LOCATOR_CACHE_SIZE")
)
watcher.last_known_block = block_processor.get_best_block_hash()
i_api = InternalAPI(watcher, internal_api_endpoint, config.get("INTERNAL_API_WORKERS"), Event())
i_api.rpc_server.start()
yield i_api
i_api.rpc_server.stop(None)
@pytest.fixture(scope="module", autouse=True)
def api():
inspector = Inspector(config.get("MIN_TO_SELF_DELAY"))
api = API(inspector, internal_api_endpoint)
return api
@pytest.fixture()
def app(api):
with api.app.app_context():
yield api.app
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def appointment(generate_dummy_appointment):
appointment, dispute_tx = generate_dummy_appointment()
locator_dispute_tx_map[appointment.locator] = dispute_tx
return appointment
def add_appointment(client, appointment_data, user_id):
r = client.post(add_appointment_endpoint, json=appointment_data)
if r.status_code == HTTP_OK:
locator = appointment_data.get("appointment").get("locator")
uuid = hash_160("{}{}".format(locator, user_id))
appointments[uuid] = appointment_data["appointment"]
return r
def test_register(internal_api, client):
# Tests registering a user within the tower
current_height = internal_api.watcher.block_processor.get_block_count()
data = {"public_key": user_id}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == user_id
assert r.json.get("available_slots") == config.get("SUBSCRIPTION_SLOTS")
assert r.json.get("subscription_expiry") == current_height + config.get("SUBSCRIPTION_DURATION")
slots = r.json.get("available_slots")
expiry = r.json.get("subscription_expiry")
subscription_receipt = receipts.create_registration_receipt(user_id, slots, expiry)
rpk = Cryptographer.recover_pk(subscription_receipt, r.json.get("subscription_signature"))
assert Cryptographer.get_compressed_pk(rpk) == teos_id
def test_register_top_up(internal_api, client):
# Calling register more than once will give us SUBSCRIPTION_SLOTS * number_of_calls slots.
# It will also refresh the expiry.
temp_sk, tmp_pk = generate_keypair()
tmp_user_id = Cryptographer.get_compressed_pk(tmp_pk)
current_height = internal_api.watcher.block_processor.get_block_count()
data = {"public_key": tmp_user_id}
for i in range(10):
r = client.post(register_endpoint, json=data)
slots = r.json.get("available_slots")
expiry = r.json.get("subscription_expiry")
assert r.status_code == HTTP_OK
assert r.json.get("public_key") == tmp_user_id
assert slots == config.get("SUBSCRIPTION_SLOTS") * (i + 1)
assert expiry == current_height + config.get("SUBSCRIPTION_DURATION")
subscription_receipt = receipts.create_registration_receipt(tmp_user_id, slots, expiry)
rpk = Cryptographer.recover_pk(subscription_receipt, r.json.get("subscription_signature"))
assert Cryptographer.get_compressed_pk(rpk) == teos_id
def test_register_no_client_pk(client):
# Test trying to register a user without sending the user public key in the request
data = {}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
def test_register_wrong_client_pk(client):
# Test trying to register a user sending an invalid user public key
data = {"public_key": user_id + user_id}
r = client.post(register_endpoint, json=data)
assert r.status_code == HTTP_BAD_REQUEST
def test_register_no_json(client):
# Test trying to register a user sending a non json body
r = client.post(register_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert errors.INVALID_REQUEST_FORMAT == r.json.get("error_code")
def test_register_json_no_inner_dict(client):
# Test trying to register a user sending an incorrectly formatted json body
r = client.post(register_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert errors.INVALID_REQUEST_FORMAT == r.json.get("error_code")
def test_add_appointment(internal_api, client, appointment, block_processor):
# Simulate the user registration (end time does not matter here as long as it is in the future)
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=1, subscription_expiry=block_processor.get_block_count() + 1
)
# Properly formatted appointment
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 0
assert r.json.get("start_block") == block_processor.get_block_count()
def test_add_appointment_no_json(client):
# No JSON data
r = client.post(add_appointment_endpoint, data="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Request is not json encoded" in r.json.get("error")
assert errors.INVALID_REQUEST_FORMAT == r.json.get("error_code")
def test_add_appointment_json_no_inner_dict(client):
# JSON data with no inner dict (invalid data format)
r = client.post(add_appointment_endpoint, json="random_message")
assert r.status_code == HTTP_BAD_REQUEST
assert "Invalid request content" in r.json.get("error")
assert errors.INVALID_REQUEST_FORMAT == r.json.get("error_code")
# FIXME: 194 will do with dummy appointment
def test_add_appointment_wrong(internal_api, client, appointment):
# Simulate the user registration (end time does not matter here)
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Incorrect appointment (properly formatted, wrong data)
appointment.to_self_delay = 0
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert errors.APPOINTMENT_FIELD_TOO_SMALL == r.json.get("error_code")
# FIXME: 194 will do with dummy appointment
def test_add_appointment_not_registered(internal_api, client, appointment):
# Properly formatted appointment, user is not registered
tmp_sk, tmp_pk = generate_keypair()
tmp_user_id = Cryptographer.get_compressed_pk(tmp_pk)
appointment_signature = Cryptographer.sign(appointment.serialize(), tmp_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, tmp_user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert errors.APPOINTMENT_INVALID_SIGNATURE_OR_SUBSCRIPTION_ERROR == r.json.get("error_code")
# FIXME: 194 will do with dummy appointment
def test_add_appointment_registered_no_free_slots(internal_api, client, appointment):
# Empty the user slots (end time does not matter here)
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=0, subscription_expiry=0)
# Properly formatted appointment, user has no available slots
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert errors.APPOINTMENT_INVALID_SIGNATURE_OR_SUBSCRIPTION_ERROR == r.json.get("error_code")
# FIXME: 194 will do with dummy appointment
def test_add_appointment_registered_not_enough_free_slots(internal_api, client, appointment):
# Give some slots to the user (end time does not matter here)
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(available_slots=1, subscription_expiry=0)
# Properly formatted appointment, user has not enough slots
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Let's create a big blob
appointment.encrypted_blob = TWO_SLOTS_BLOTS
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_BAD_REQUEST
assert errors.APPOINTMENT_INVALID_SIGNATURE_OR_SUBSCRIPTION_ERROR == r.json.get("error_code")
# FIXME: 194 will do with dummy appointment and block_processor
def test_add_appointment_multiple_times_same_user(
internal_api, client, appointment, block_processor, n=MULTIPLE_APPOINTMENTS
):
# Multiple appointments with the same locator should be valid and count as updates
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
# Simulate registering enough slots (end time does not matter here)
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=n, subscription_expiry=block_processor.get_block_count() + 1
)
for _ in range(n):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == n - 1
assert r.json.get("start_block") == block_processor.get_block_count()
# Since all updates came from the same user, only the last one is stored
assert len(internal_api.watcher.locator_uuid_map[appointment.locator]) == 1
# FIXME: 194 will do with dummy appointment and block_processor
def test_add_appointment_multiple_times_different_users(
internal_api, client, appointment, block_processor, n=MULTIPLE_APPOINTMENTS
):
# If the same appointment comes from different users, all are kept
# Create user keys and appointment signatures
user_keys = [generate_keypair() for _ in range(n)]
signatures = [Cryptographer.sign(appointment.serialize(), key[0]) for key in user_keys]
tmp_user_ids = [Cryptographer.get_compressed_pk(pk) for _, pk in user_keys]
# Add one slot per public key
for pair in user_keys:
user_id = Cryptographer.get_compressed_pk(pair[1])
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=1, subscription_expiry=block_processor.get_block_count() + 1
)
# Send the appointments
for compressed_pk, signature in zip(tmp_user_ids, signatures):
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": signature}, compressed_pk)
assert r.status_code == HTTP_OK
assert r.json.get("available_slots") == 0
assert r.json.get("start_block") == block_processor.get_block_count()
# Check that all the appointments have been added and that there are no duplicates
assert len(set(internal_api.watcher.locator_uuid_map[appointment.locator])) == n
# FIXME: 194 will do with dummy appointment and block_processor
def test_add_appointment_update_same_size(internal_api, client, appointment, block_processor):
# Update an appointment by one of the same size and check that no additional slots are filled
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=1, subscription_expiry=block_processor.get_block_count() + 1
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == block_processor.get_block_count()
)
# The user has no additional slots, but it should be able to update
# Let's just reverse the encrypted blob for example
appointment.encrypted_blob = appointment.encrypted_blob[::-1]
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == block_processor.get_block_count()
)
# FIXME: 194 will do with dummy appointment and block_processor
def test_add_appointment_update_bigger(internal_api, client, appointment, block_processor):
# Update an appointment by one bigger, and check additional slots are filled
internal_api.watcher.gatekeeper.registered_users[user_id] = UserInfo(
available_slots=2, subscription_expiry=block_processor.get_block_count() + 1
)
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert r.status_code == HTTP_OK and r.json.get("available_slots") == 1
# The user has one slot, so it should be able to update as long as it only takes 1 additional slot
appointment.encrypted_blob = TWO_SLOTS_BLOTS
appointment_signature = Cryptographer.sign(appointment.serialize(), user_sk)
r = add_appointment(client, {"appointment": appointment.to_dict(), "signature": appointment_signature}, user_id)
assert (
r.status_code == HTTP_OK
and r.json.get("available_slots") == 0
and r.json.get("start_block") == block_processor.get_block_count()
)
# Check that it'll fail if no enough slots | |
<reponame>esoma/woosh<filename>test/python/sample/stdlib/test_operator.py<gh_stars>0
# this file was generated using test/python/sample/generate.py
# python
import io
import pathlib
# pytest
import pytest
# woosh
import woosh
def tokenize_file_like(source):
return list(woosh.tokenize(io.BytesIO(source)))
def tokenize_bytes(source):
return list(woosh.tokenize(source))
SAMPLE_DIR = pathlib.Path(__file__).parent.absolute() / '../../' / '../../' / 'sample'
@pytest.mark.parametrize('tokenize', [tokenize_file_like, tokenize_bytes])
def test(tokenize):
with open(SAMPLE_DIR / 'stdlib/operator.py', 'rb') as f:
tokens = tokenize(f.read())
for token, expected in zip(tokens, EXPECTED):
assert token == expected
EXPECTED = [
woosh.Token(woosh.ENCODING, 'utf-8', 1, 0, 1, 0),
woosh.Token(woosh.STRING, '"""\r\nOperator Interface\r\n\r\nThis module exports a set of functions corresponding to the intrinsic\r\noperators of Python. For example, operator.add(x, y) is equivalent\r\nto the expression x+y. The function names are those used for special\r\nmethods; variants without leading and trailing \'__\' are also provided\r\nfor convenience.\r\n\r\nThis is the pure Python implementation of the module.\r\n"""', 1, 0, 11, 3),
woosh.Token(woosh.NEWLINE, '\r\n', 11, 3, 12, 0),
woosh.Token(woosh.NAME, '__all__', 13, 0, 13, 7),
woosh.Token(woosh.OP, '=', 13, 8, 13, 9),
woosh.Token(woosh.OP, '[', 13, 10, 13, 11),
woosh.Token(woosh.STRING, "'abs'", 13, 11, 13, 16),
woosh.Token(woosh.OP, ',', 13, 16, 13, 17),
woosh.Token(woosh.STRING, "'add'", 13, 18, 13, 23),
woosh.Token(woosh.OP, ',', 13, 23, 13, 24),
woosh.Token(woosh.STRING, "'and_'", 13, 25, 13, 31),
woosh.Token(woosh.OP, ',', 13, 31, 13, 32),
woosh.Token(woosh.STRING, "'attrgetter'", 13, 33, 13, 45),
woosh.Token(woosh.OP, ',', 13, 45, 13, 46),
woosh.Token(woosh.STRING, "'concat'", 13, 47, 13, 55),
woosh.Token(woosh.OP, ',', 13, 55, 13, 56),
woosh.Token(woosh.STRING, "'contains'", 13, 57, 13, 67),
woosh.Token(woosh.OP, ',', 13, 67, 13, 68),
woosh.Token(woosh.STRING, "'countOf'", 13, 69, 13, 78),
woosh.Token(woosh.OP, ',', 13, 78, 13, 79),
woosh.Token(woosh.STRING, "'delitem'", 14, 11, 14, 20),
woosh.Token(woosh.OP, ',', 14, 20, 14, 21),
woosh.Token(woosh.STRING, "'eq'", 14, 22, 14, 26),
woosh.Token(woosh.OP, ',', 14, 26, 14, 27),
woosh.Token(woosh.STRING, "'floordiv'", 14, 28, 14, 38),
woosh.Token(woosh.OP, ',', 14, 38, 14, 39),
woosh.Token(woosh.STRING, "'ge'", 14, 40, 14, 44),
woosh.Token(woosh.OP, ',', 14, 44, 14, 45),
woosh.Token(woosh.STRING, "'getitem'", 14, 46, 14, 55),
woosh.Token(woosh.OP, ',', 14, 55, 14, 56),
woosh.Token(woosh.STRING, "'gt'", 14, 57, 14, 61),
woosh.Token(woosh.OP, ',', 14, 61, 14, 62),
woosh.Token(woosh.STRING, "'iadd'", 14, 63, 14, 69),
woosh.Token(woosh.OP, ',', 14, 69, 14, 70),
woosh.Token(woosh.STRING, "'iand'", 14, 71, 14, 77),
woosh.Token(woosh.OP, ',', 14, 77, 14, 78),
woosh.Token(woosh.STRING, "'iconcat'", 15, 11, 15, 20),
woosh.Token(woosh.OP, ',', 15, 20, 15, 21),
woosh.Token(woosh.STRING, "'ifloordiv'", 15, 22, 15, 33),
woosh.Token(woosh.OP, ',', 15, 33, 15, 34),
woosh.Token(woosh.STRING, "'ilshift'", 15, 35, 15, 44),
woosh.Token(woosh.OP, ',', 15, 44, 15, 45),
woosh.Token(woosh.STRING, "'imatmul'", 15, 46, 15, 55),
woosh.Token(woosh.OP, ',', 15, 55, 15, 56),
woosh.Token(woosh.STRING, "'imod'", 15, 57, 15, 63),
woosh.Token(woosh.OP, ',', 15, 63, 15, 64),
woosh.Token(woosh.STRING, "'imul'", 15, 65, 15, 71),
woosh.Token(woosh.OP, ',', 15, 71, 15, 72),
woosh.Token(woosh.STRING, "'index'", 16, 11, 16, 18),
woosh.Token(woosh.OP, ',', 16, 18, 16, 19),
woosh.Token(woosh.STRING, "'indexOf'", 16, 20, 16, 29),
woosh.Token(woosh.OP, ',', 16, 29, 16, 30),
woosh.Token(woosh.STRING, "'inv'", 16, 31, 16, 36),
woosh.Token(woosh.OP, ',', 16, 36, 16, 37),
woosh.Token(woosh.STRING, "'invert'", 16, 38, 16, 46),
woosh.Token(woosh.OP, ',', 16, 46, 16, 47),
woosh.Token(woosh.STRING, "'ior'", 16, 48, 16, 53),
woosh.Token(woosh.OP, ',', 16, 53, 16, 54),
woosh.Token(woosh.STRING, "'ipow'", 16, 55, 16, 61),
woosh.Token(woosh.OP, ',', 16, 61, 16, 62),
woosh.Token(woosh.STRING, "'irshift'", 16, 63, 16, 72),
woosh.Token(woosh.OP, ',', 16, 72, 16, 73),
woosh.Token(woosh.STRING, "'is_'", 17, 11, 17, 16),
woosh.Token(woosh.OP, ',', 17, 16, 17, 17),
woosh.Token(woosh.STRING, "'is_not'", 17, 18, 17, 26),
woosh.Token(woosh.OP, ',', 17, 26, 17, 27),
woosh.Token(woosh.STRING, "'isub'", 17, 28, 17, 34),
woosh.Token(woosh.OP, ',', 17, 34, 17, 35),
woosh.Token(woosh.STRING, "'itemgetter'", 17, 36, 17, 48),
woosh.Token(woosh.OP, ',', 17, 48, 17, 49),
woosh.Token(woosh.STRING, "'itruediv'", 17, 50, 17, 60),
woosh.Token(woosh.OP, ',', 17, 60, 17, 61),
woosh.Token(woosh.STRING, "'ixor'", 17, 62, 17, 68),
woosh.Token(woosh.OP, ',', 17, 68, 17, 69),
woosh.Token(woosh.STRING, "'le'", 17, 70, 17, 74),
woosh.Token(woosh.OP, ',', 17, 74, 17, 75),
woosh.Token(woosh.STRING, "'length_hint'", 18, 11, 18, 24),
woosh.Token(woosh.OP, ',', 18, 24, 18, 25),
woosh.Token(woosh.STRING, "'lshift'", 18, 26, 18, 34),
woosh.Token(woosh.OP, ',', 18, 34, 18, 35),
woosh.Token(woosh.STRING, "'lt'", 18, 36, 18, 40),
woosh.Token(woosh.OP, ',', 18, 40, 18, 41),
woosh.Token(woosh.STRING, "'matmul'", 18, 42, 18, 50),
woosh.Token(woosh.OP, ',', 18, 50, 18, 51),
woosh.Token(woosh.STRING, "'methodcaller'", 18, 52, 18, 66),
woosh.Token(woosh.OP, ',', 18, 66, 18, 67),
woosh.Token(woosh.STRING, "'mod'", 18, 68, 18, 73),
woosh.Token(woosh.OP, ',', 18, 73, 18, 74),
woosh.Token(woosh.STRING, "'mul'", 19, 11, 19, 16),
woosh.Token(woosh.OP, ',', 19, 16, 19, 17),
woosh.Token(woosh.STRING, "'ne'", 19, 18, 19, 22),
woosh.Token(woosh.OP, ',', 19, 22, 19, 23),
woosh.Token(woosh.STRING, "'neg'", 19, 24, 19, 29),
woosh.Token(woosh.OP, ',', 19, 29, 19, 30),
woosh.Token(woosh.STRING, "'not_'", 19, 31, 19, 37),
woosh.Token(woosh.OP, ',', 19, 37, 19, 38),
woosh.Token(woosh.STRING, "'or_'", 19, 39, 19, 44),
woosh.Token(woosh.OP, ',', 19, 44, 19, 45),
woosh.Token(woosh.STRING, "'pos'", 19, 46, 19, 51),
woosh.Token(woosh.OP, ',', 19, 51, 19, 52),
woosh.Token(woosh.STRING, "'pow'", 19, 53, 19, 58),
woosh.Token(woosh.OP, ',', 19, 58, 19, 59),
woosh.Token(woosh.STRING, "'rshift'", 19, 60, 19, 68),
woosh.Token(woosh.OP, ',', 19, 68, 19, 69),
woosh.Token(woosh.STRING, "'setitem'", 20, 11, 20, 20),
woosh.Token(woosh.OP, ',', 20, 20, 20, 21),
woosh.Token(woosh.STRING, "'sub'", 20, 22, 20, 27),
woosh.Token(woosh.OP, ',', 20, 27, 20, 28),
woosh.Token(woosh.STRING, "'truediv'", 20, 29, 20, 38),
woosh.Token(woosh.OP, ',', 20, 38, 20, 39),
woosh.Token(woosh.STRING, "'truth'", 20, 40, 20, 47),
woosh.Token(woosh.OP, ',', 20, 47, 20, 48),
woosh.Token(woosh.STRING, "'xor'", 20, 49, 20, 54),
woosh.Token(woosh.OP, ']', 20, 54, 20, 55),
woosh.Token(woosh.NEWLINE, '\r\n', 20, 55, 21, 0),
woosh.Token(woosh.NAME, 'from', 22, 0, 22, 4),
woosh.Token(woosh.NAME, 'builtins', 22, 5, 22, 13),
woosh.Token(woosh.NAME, 'import', 22, 14, 22, 20),
woosh.Token(woosh.NAME, 'abs', 22, 21, 22, 24),
woosh.Token(woosh.NAME, 'as', 22, 25, 22, 27),
woosh.Token(woosh.NAME, '_abs', 22, 28, 22, 32),
woosh.Token(woosh.NEWLINE, '\r\n', 22, 32, 23, 0),
woosh.Token(woosh.COMMENT, '# Comparison Operations *******************************************************#', 25, 0, 25, 80),
woosh.Token(woosh.NAME, 'def', 27, 0, 27, 3),
woosh.Token(woosh.NAME, 'lt', 27, 4, 27, 6),
woosh.Token(woosh.OP, '(', 27, 6, 27, 7),
woosh.Token(woosh.NAME, 'a', 27, 7, 27, 8),
woosh.Token(woosh.OP, ',', 27, 8, 27, 9),
woosh.Token(woosh.NAME, 'b', 27, 10, 27, 11),
woosh.Token(woosh.OP, ')', 27, 11, 27, 12),
woosh.Token(woosh.OP, ':', 27, 12, 27, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 27, 13, 28, 0),
woosh.Token(woosh.INDENT, ' ', 28, 0, 28, 4),
woosh.Token(woosh.STRING, '"Same as a < b."', 28, 4, 28, 20),
woosh.Token(woosh.NEWLINE, '\r\n', 28, 20, 29, 0),
woosh.Token(woosh.NAME, 'return', 29, 4, 29, 10),
woosh.Token(woosh.NAME, 'a', 29, 11, 29, 12),
woosh.Token(woosh.OP, '<', 29, 13, 29, 14),
woosh.Token(woosh.NAME, 'b', 29, 15, 29, 16),
woosh.Token(woosh.NEWLINE, '\r\n', 29, 16, 30, 0),
woosh.Token(woosh.DEDENT, '', 31, 0, 31, 0),
woosh.Token(woosh.NAME, 'def', 31, 0, 31, 3),
woosh.Token(woosh.NAME, 'le', 31, 4, 31, 6),
woosh.Token(woosh.OP, '(', 31, 6, 31, 7),
woosh.Token(woosh.NAME, 'a', 31, 7, 31, 8),
woosh.Token(woosh.OP, ',', 31, 8, 31, 9),
woosh.Token(woosh.NAME, 'b', 31, 10, 31, 11),
woosh.Token(woosh.OP, ')', 31, 11, 31, 12),
woosh.Token(woosh.OP, ':', 31, 12, 31, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 31, 13, 32, 0),
woosh.Token(woosh.INDENT, ' ', 32, 0, 32, 4),
woosh.Token(woosh.STRING, '"Same as a <= b."', 32, 4, 32, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 32, 21, 33, 0),
woosh.Token(woosh.NAME, 'return', 33, 4, 33, 10),
woosh.Token(woosh.NAME, 'a', 33, 11, 33, 12),
woosh.Token(woosh.OP, '<=', 33, 13, 33, 15),
woosh.Token(woosh.NAME, 'b', 33, 16, 33, 17),
woosh.Token(woosh.NEWLINE, '\r\n', 33, 17, 34, 0),
woosh.Token(woosh.DEDENT, '', 35, 0, 35, 0),
woosh.Token(woosh.NAME, 'def', 35, 0, 35, 3),
woosh.Token(woosh.NAME, 'eq', 35, 4, 35, 6),
woosh.Token(woosh.OP, '(', 35, 6, 35, 7),
woosh.Token(woosh.NAME, 'a', 35, 7, 35, 8),
woosh.Token(woosh.OP, ',', 35, 8, 35, 9),
woosh.Token(woosh.NAME, 'b', 35, 10, 35, 11),
woosh.Token(woosh.OP, ')', 35, 11, 35, 12),
woosh.Token(woosh.OP, ':', 35, 12, 35, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 35, 13, 36, 0),
woosh.Token(woosh.INDENT, ' ', 36, 0, 36, 4),
woosh.Token(woosh.STRING, '"Same as a == b."', 36, 4, 36, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 36, 21, 37, 0),
woosh.Token(woosh.NAME, 'return', 37, 4, 37, 10),
woosh.Token(woosh.NAME, 'a', 37, 11, 37, 12),
woosh.Token(woosh.OP, '==', 37, 13, 37, 15),
woosh.Token(woosh.NAME, 'b', 37, 16, 37, 17),
woosh.Token(woosh.NEWLINE, '\r\n', 37, 17, 38, 0),
woosh.Token(woosh.DEDENT, '', 39, 0, 39, 0),
woosh.Token(woosh.NAME, 'def', 39, 0, 39, 3),
woosh.Token(woosh.NAME, 'ne', 39, 4, 39, 6),
woosh.Token(woosh.OP, '(', 39, 6, 39, 7),
woosh.Token(woosh.NAME, 'a', 39, 7, 39, 8),
woosh.Token(woosh.OP, ',', 39, 8, 39, 9),
woosh.Token(woosh.NAME, 'b', 39, 10, 39, 11),
woosh.Token(woosh.OP, ')', 39, 11, 39, 12),
woosh.Token(woosh.OP, ':', 39, 12, 39, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 39, 13, 40, 0),
woosh.Token(woosh.INDENT, ' ', 40, 0, 40, 4),
woosh.Token(woosh.STRING, '"Same as a != b."', 40, 4, 40, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 40, 21, 41, 0),
woosh.Token(woosh.NAME, 'return', 41, 4, 41, 10),
woosh.Token(woosh.NAME, 'a', 41, 11, 41, 12),
woosh.Token(woosh.OP, '!=', 41, 13, 41, 15),
woosh.Token(woosh.NAME, 'b', 41, 16, 41, 17),
woosh.Token(woosh.NEWLINE, '\r\n', 41, 17, 42, 0),
woosh.Token(woosh.DEDENT, '', 43, 0, 43, 0),
woosh.Token(woosh.NAME, 'def', 43, 0, 43, 3),
woosh.Token(woosh.NAME, 'ge', 43, 4, 43, 6),
woosh.Token(woosh.OP, '(', 43, 6, 43, 7),
woosh.Token(woosh.NAME, 'a', 43, 7, 43, 8),
woosh.Token(woosh.OP, ',', 43, 8, 43, 9),
woosh.Token(woosh.NAME, 'b', 43, 10, 43, 11),
woosh.Token(woosh.OP, ')', 43, 11, 43, 12),
woosh.Token(woosh.OP, ':', 43, 12, 43, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 43, 13, 44, 0),
woosh.Token(woosh.INDENT, ' ', 44, 0, 44, 4),
woosh.Token(woosh.STRING, '"Same as a >= b."', 44, 4, 44, 21),
woosh.Token(woosh.NEWLINE, '\r\n', 44, 21, 45, 0),
woosh.Token(woosh.NAME, 'return', 45, 4, 45, 10),
woosh.Token(woosh.NAME, 'a', 45, 11, 45, 12),
woosh.Token(woosh.OP, '>=', 45, 13, 45, 15),
woosh.Token(woosh.NAME, 'b', 45, 16, 45, 17),
woosh.Token(woosh.NEWLINE, '\r\n', 45, 17, 46, 0),
woosh.Token(woosh.DEDENT, '', 47, 0, 47, 0),
woosh.Token(woosh.NAME, 'def', 47, 0, 47, 3),
woosh.Token(woosh.NAME, 'gt', 47, 4, 47, 6),
woosh.Token(woosh.OP, '(', 47, 6, 47, 7),
woosh.Token(woosh.NAME, 'a', 47, 7, 47, 8),
woosh.Token(woosh.OP, ',', 47, 8, 47, 9),
woosh.Token(woosh.NAME, 'b', 47, 10, 47, 11),
woosh.Token(woosh.OP, ')', 47, 11, 47, 12),
woosh.Token(woosh.OP, ':', 47, 12, 47, 13),
woosh.Token(woosh.NEWLINE, '\r\n', 47, 13, 48, 0),
woosh.Token(woosh.INDENT, ' ', 48, 0, 48, 4),
woosh.Token(woosh.STRING, '"Same as a > b."', 48, 4, 48, 20),
woosh.Token(woosh.NEWLINE, '\r\n', 48, 20, 49, 0),
woosh.Token(woosh.NAME, 'return', 49, 4, 49, 10),
woosh.Token(woosh.NAME, 'a', 49, 11, 49, 12),
woosh.Token(woosh.OP, '>', 49, 13, 49, 14),
woosh.Token(woosh.NAME, 'b', 49, 15, 49, 16),
woosh.Token(woosh.NEWLINE, '\r\n', | |
.. ... 'axis': 'Y'},
.. ... data=cf.Data([15, 45, 75]),
.. ... bounds=cf.Bounds(data=cf.Data([[0, 30], [30, 60], [60, 90]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=3))
.. ... )
.. >>> lon = f.set_construct(
.. ... cf.DimensionCoordinate(
.. ... properties={'standard_name': 'longitude',
.. ... 'units': 'degrees_east',
.. ... 'axis': 'X'},
.. ... data=cf.Data([30, 90, 150]),
.. ... bounds=cf.Bounds(data=cf.Data([[0, 60], [60, 120], [120, 180]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=3))
.. ... )
.. >>> alt = f.set_construct(
.. ... cf.DimensionCoordinate(
.. ... properties={'standard_name': 'altitude',
.. ... 'units': 'm',
.. ... 'axis': 'Z'},
.. ... data=cf.Data([10]),
.. ... bounds=cf.Bounds(data=cf.Data([[0, 20]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=1))
.. ... )
.. >>> sd = LatLonGrid.from_field(f)
.. >>> print(sd)
.. LatLonGrid(
.. shape {Z, Y, X}: (1, 3, 3)
.. Z, altitude (1,): [10] m
.. Y, latitude (3,): [15, 45, 75] degrees_north
.. X, longitude (3,): [30, 90, 150] degrees_east
.. Z_bounds (1, 2): [[0, 20]] m
.. Y_bounds (3, 2): [[0, ..., 90]] degrees_north
.. X_bounds (3, 2): [[0, ..., 180]] degrees_east
.. )
"""
return super(LatLonGrid, cls).from_field(field)
class RotatedLatLonGrid(Grid):
"""This class characterises the spatial dimension for a `Component`
as a regular grid on a spherical domain whose coordinates are
latitudes and longitudes, and whose rotation axis is not aligned
with the North pole. Its ellipsoid and datum are those of WGS 84
(see `EPSG:4326 <https://epsg.io/4326>`_).
"""
# characteristics of the dimension coordinates
_Z_name = 'altitude'
_Y_name = 'grid_latitude'
_X_name = 'grid_longitude'
_Z_units = ['m', 'metre', 'meter', 'metres', 'meters']
_Y_units = ['degrees', 'degree']
_X_units = ['degrees', 'degree']
_Z_limits = None
_Y_limits = (-90, 90)
_X_limits = (-180, 180)
# contiguity of lower and upper limits
_Z_limits_contiguous = False
_Y_limits_contiguous = True
_X_limits_contiguous = True
# allow domain to wrap around limits
_Z_wrap_around = False
_Y_wrap_around = False
_X_wrap_around = True
def __init__(
self,
grid_latitude, grid_longitude,
grid_latitude_bounds, grid_longitude_bounds,
grid_north_pole_latitude, grid_north_pole_longitude,
north_pole_grid_longitude=0.,
# altitude=None, altitude_bounds=None
):
"""**Instantiation**
:Parameters:
grid_latitude: one-dimensional array-like object
The array of latitude coordinates in degrees defining
a spatial dimension of the domain. May be any type that
can be cast to a `numpy.ndarray`. Must contain numerical
values.
*Parameter example:* ::
grid_latitude=[0.88, 0.44, 0., -0.44, -0.88]
grid_longitude: one-dimensional array-like object
The array of longitude coordinates in degrees defining
a spatial dimension of the domain. May be any type that
can be cast to a `numpy.ndarray`. Must contain numerical
values.
*Parameter example:* ::
grid_longitude=[-2.5, -2.06, -1.62, -1.18]
grid_latitude_bounds: two-dimensional array-like object
The array of latitude coordinate bounds in degrees
defining the extent of the grid cell around the
coordinate. May be any type that can be cast to a
`numpy.ndarray`. Must be two dimensional with the first
dimension equal to the size of *grid_latitude* and the
second dimension equal to 2. Must contain numerical
values.
*Parameter example:* ::
grid_latitude_bounds=[[1.1, 0.66], [0.66, 0.22],
[0.22, -0.22], [-0.22, -0.66],
[-0.66, -1.1]]
grid_longitude_bounds: two-dimensional array-like object
The array of longitude coordinate bounds in degrees
defining the extent of the grid cell around the
coordinate. May be any type that can be cast to a
`numpy.ndarray`. Must be two dimensional with the first
dimension equal to the size of *grid_longitude* and the
second dimension equal to 2. Must contain numerical
values.
*Parameter example:* ::
grid_longitude_bounds=[[-2.72, -2.28], [-2.28, -1.84],
[-1.84, -1.4], [-1.4, -0.96]]
grid_north_pole_latitude: `int` or `float`
The true latitude (i.e. in `EPSG:4326`_) of the north
pole of the rotated grid in degrees North. This parameter
is required to project the rotated grid into a true
latitude-longitude coordinate system.
grid_north_pole_longitude: `int` or `float`
The true longitude (i.e. in `EPSG:4326`_) of the north
pole of the rotated grid in degrees East. This parameter
is required to project the rotated grid into a true
latitude-longitude coordinate system.
north_pole_grid_longitude: `int` or `float`, optional
The longitude of the true north pole (i.e. in `EPSG:4326`_)
in the rotated grid in degrees. This parameter is
optional to project the rotated grid into a true
latitude-longitude coordinate system. If not provided,
set to default value 0.
.. altitude: one-dimensional array-like object, optional
The array of altitude coordinates in metres defining a
spatial dimension of the domain (with upwards as the
positive direction). May be any type that can be cast to
a `numpy.ndarray`. Must contain numerical values.
Ignored if *altitude_bounds* not also provided.
*Parameter example:* ::
altitude=[10]
.. altitude_bounds: two-dimensional array-like object, optional
The array of altitude coordinate bounds in metres
defining the extent of the grid cell around the
coordinate (with upwards as the positive direction).
May be any type that can be cast to a `numpy.ndarray`.
Must be two dimensional with the first dimension equal
to the size of *altitude* and the second dimension equal
to 2. Must contain numerical values. Ignored if
*altitude* not also provided.
*Parameter example:* ::
altitude_bounds=[[0, 20]]
.. _`EPSG:4326`: https://epsg.io/4326
**Examples**
Instantiate 2D grid using lists:
>>> sd = RotatedLatLonGrid(
... grid_latitude=[-0.88, -0.44, 0., 0.44, 0.88],
... grid_longitude=[-2.5, -2.06, -1.62, -1.18],
... grid_latitude_bounds=[[-1.1, -0.66], [-0.66, -0.22], [-0.22, 0.22],
... [0.22, 0.66], [0.66, 1.1]],
... grid_longitude_bounds=[[-2.72, -2.28], [-2.28, -1.84],
... [-1.84, -1.4], [-1.4, -0.96]],
... grid_north_pole_latitude=38.0,
... grid_north_pole_longitude=190.0,
... )
>>> print(sd)
RotatedLatLonGrid(
shape {Y, X}: (5, 4)
Y, grid_latitude (5,): [-0.88, ..., 0.88] degrees
X, grid_longitude (4,): [-2.5, ..., -1.18] degrees
Y_bounds (5, 2): [[-1.1, ..., 1.1]] degrees
X_bounds (4, 2): [[-2.72, ..., -0.96]] degrees
)
.. Instantiate 3D grid using lists:
..
.. >>> sd = RotatedLatLonGrid(
.. ... grid_latitude=[-0.88, -0.44, 0., 0.44, 0.88],
.. ... grid_longitude=[-2.5, -2.06, -1.62, -1.18],
.. ... grid_latitude_bounds=[[-1.1, -0.66], [-0.66, -0.22], [-0.22, 0.22],
.. ... [0.22, 0.66], [0.66, 1.1]],
.. ... grid_longitude_bounds=[[-2.72, -2.28], [-2.28, -1.84],
.. ... [-1.84, -1.4], [-1.4, -0.96]],
.. ... grid_north_pole_latitude=38.0,
.. ... grid_north_pole_longitude=190.0,
.. ... altitude=[10],
.. ... altitude_bounds=[[0, 20]]
.. ... )
.. >>> print(sd)
.. RotatedLatLonGrid(
.. shape {Y, X}: (1, 5, 4)
.. Y, grid_latitude (5,): [-0.88, ..., 0.88] degrees
.. X, grid_longitude (4,): [-2.5, ..., -1.18] degrees
.. Z_bounds (1, 2): [[0, 20]] m
.. Y_bounds (5, 2): [[-1.1, ..., 1.1]] degrees
.. X_bounds (4, 2): [[-2.72, ..., -0.96]] degrees
.. )
"""
super(RotatedLatLonGrid, self).__init__()
# TODO: reintroduce Z dimension when 3D components
# are effectively supported
# if altitude is not None and altitude_bounds is not None:
# self._set_space(altitude, altitude_bounds, name=self._Z_name,
# units=self._Z_units[0], axis='Z',
# limits=self._Z_limits, wrap_around=self._Z_wrap_around)
# self._f.dim('Z').set_property('positive', 'up')
self._set_space(grid_latitude, grid_latitude_bounds,
name=self._Y_name, units=self._Y_units[0], axis='Y',
limits=self._Y_limits, wrap_around=self._Y_wrap_around)
self._set_space(grid_longitude, grid_longitude_bounds,
name=self._X_name, units=self._X_units[0], axis='X',
limits=self._X_limits, wrap_around=self._X_wrap_around)
self._rotate_and_set_lat_lon(grid_north_pole_latitude,
grid_north_pole_longitude,
north_pole_grid_longitude)
self._set_crs_parameters(grid_north_pole_latitude,
grid_north_pole_longitude,
north_pole_grid_longitude)
# set dummy data needed for using inner field for remapping
self._set_dummy_data()
@classmethod
def from_extent_and_resolution(
cls,
grid_latitude_extent, grid_longitude_extent,
grid_latitude_resolution, grid_longitude_resolution,
grid_north_pole_latitude, grid_north_pole_longitude,
north_pole_grid_longitude=0.,
grid_latitude_grid_longitude_location='centre',
# altitude_extent=None,
# altitude_resolution=None,
# altitude_location='centre'
):
"""Instantiate a `RotatedLatLonGrid` from the extent and the
resolution of grid_latitude and grid_longitude coordinates (and
optional altitude coordinates).
:Parameters:
grid_latitude_extent: pair of `float` or `int`
The extent of grid_latitude coordinates in degrees for
the desired grid. The first element of the pair is the
location of the start of the extent along the
grid_latitude coordinate, the second element of the pair
is the location of the end of the extent along the
grid_latitude coordinate. Extent must be oriented
positively. May be any type that can be unpacked (e.g.
`tuple`, `list`, `numpy.ndarray`).
*Parameter example:* ::
grid_latitude_extent=(30, 70)
grid_longitude_extent: pair of `float` or `int`
The extent of grid_longitude coordinates in degrees for
the desired grid. The first element of the pair is the
location of the start of the extent along the
grid_longitude coordinate, the second element of the
pair is the location of the end of the extent along the
grid_latitude coordinate. Extent must be oriented
positively. May be any type that can be unpacked (e.g.
`tuple`, `list`, `numpy.ndarray`).
*Parameter example:* ::
grid_longitude_extent=(0, 90)
grid_latitude_resolution: `float` or `int`
The spacing between two consecutive grid_latitude
coordinates in degrees for the desired grid. Must be
positive.
*Parameter example:* ::
grid_latitude_resolution=10
grid_longitude_resolution: `float` or `int`
The spacing between two consecutive | |
'0'
else:
string += str(letter)
return string
def channels_preview_update(self):
if not (isinstance(self.gl.pyimage, list) and len(self.gl.pyimage) != 0):
return
pyimage = self.gl.pyimage[0]
if self.gl.input_format_24bit.get():
pyimage = swap_channels24_c(pyimage, self.channel_get_mode())
pyimage = pygame.transform.smoothscale(pyimage, (64, 64))
# CONVERT PYGAME SURFACE TO TKINTER PHOTOIMAGE
tkinter_image = pygame_to_tkinter(pyimage, 64, 64)
self.rgb_channel_preview.image = tkinter_image
self.rgb_channel_preview.config(image=tkinter_image)
self.rgb_channel_preview.update()
else:
pyimage = swap_channels32_c(pyimage, self.channel_get_mode())
pyimage = pygame.transform.smoothscale(pyimage, (64, 64))
# CONVERT PYGAME SURFACE TO TKINTER PHOTOIMAGE
tkinter_image = pygame_to_tkinter(pyimage, 64, 64)
self.rgb_channel_preview.image = tkinter_image
self.rgb_channel_preview.config(image=tkinter_image)
self.rgb_channel_preview.update()
def rgb_channel_effect(self):
rgb_channels_labelframe = LabelFrame(
self.effect_labelframe, text="RGB channels", bg=self.gl.bkcolor, width=475, height=110, bd=2)
rgb_channels_labelframe.place(x=0, y=600)
channels_checkbutton = Checkbutton(
rgb_channels_labelframe, text='Channels', bg=self.gl.bkcolor, variable=self.gl.channel_checkbox,
onvalue=1, offvalue=0, command=dummy)
channels_checkbutton.place(x=5, y=25)
channels_balloon = Pmw.Balloon(self.root)
channels_balloon.bind(channels_checkbutton, "Select RGB channels effect")
def validate_data_channel(w):
self.channels_preview_update()
return True
vcmd_channel = (rgb_channels_labelframe.register(validate_data_channel), '%V')
Label(rgb_channels_labelframe, text="conversion mode", bg=self.gl.bkcolor).place(x=90, y=0)
self.channel_conversion = ttk.Combobox(
rgb_channels_labelframe, textvariable=self.gl.widget_var,
values=["RGB", "RBG", "GRB", "BRG", "BGR", "GBR"], state='readonly', width=10,
justify='left', validate='all', validatecommand=vcmd_channel)
self.channel_conversion.current(0)
self.channel_conversion.place(x=90, y=25)
channels_conversion_balloon = Pmw.Balloon(self.root)
channels_conversion_balloon.bind(self.channel_conversion, "Select image format")
active_channels = LabelFrame(
rgb_channels_labelframe, text="Active channels", bg=self.gl.bkcolor,
width=130, height=60, bd=2)
active_channels.place(x=200, y=5)
red_channel = Checkbutton(
active_channels, text='R', bg=self.gl.bkcolor, variable=self.gl.red_channel, onvalue=1,
offvalue=0, command=self.channels_preview_update)
red_channel.place(x=5, y=5)
red_channel_balloon = Pmw.Balloon(self.root)
red_channel_balloon.bind(red_channel, "Select or unselect the red channel")
green_channel = Checkbutton(
active_channels, text='G', bg=self.gl.bkcolor, variable=self.gl.green_channel, onvalue=1,
offvalue=0, command=self.channels_preview_update)
green_channel.place(x=45, y=5)
green_channel_balloon = Pmw.Balloon(self.root)
green_channel_balloon.bind(green_channel, "Select or unselect the green channel")
blue_channel = Checkbutton(
active_channels, text='B', bg=self.gl.bkcolor, variable=self.gl.blue_channel,
onvalue=1, offvalue=0, command=self.channels_preview_update)
blue_channel.place(x=85, y=5)
blue_channel_balloon = Pmw.Balloon(self.root)
blue_channel_balloon.bind(blue_channel, "Select or unselect the blue channel")
self.rgb_channel_preview = Label(rgb_channels_labelframe, bg=self.gl.bkcolor,
relief=RAISED, image=self.gl.preview_image)
self.rgb_channel_preview.place(x=360, y=10)
def rgb_split_preview_update(self):
if not (isinstance(self.gl.pyimage, list) and len(self.gl.pyimage) != 0):
return
pyimage = self.gl.pyimage[0]
x_offset = int(self.gl.rgbsplitxoffset.get())
y_offset = int(self.gl.rgbsplityoffset.get())
w, h = pyimage.get_size()
if self.gl.input_format_24bit.get():
new_surface = pygame.Surface((w + 2 * x_offset, h + 2 * y_offset))
new_surface.fill((0, 0, 0, 0))
surf = new_surface
red_layer, green_layer, blue_layer = rgb_split_channels(pyimage)
if self.gl.split_red_checkbox.get() == 1:
surf.blit(red_layer, (0, 0), special_flags=pygame.BLEND_RGB_ADD)
if self.gl.split_green_checkbox.get() == 1:
surf.blit(green_layer, (x_offset, y_offset), special_flags=pygame.BLEND_RGB_ADD)
if self.gl.split_blue_checkbox.get() == 1:
surf.blit(blue_layer, (x_offset * 2, y_offset * 2),
special_flags=pygame.BLEND_RGB_ADD)
pyimage = pygame.transform.smoothscale(surf, (64, 64))
# CONVERT PYGAME SURFACE TO TKINTER PHOTOIMAGE
tkinter_image = pygame_to_tkinter(pyimage, 64, 64)
self.rgbsplit_preview.image = tkinter_image
self.rgbsplit_preview.config(image=tkinter_image)
self.rgbsplit_preview.update()
else:
new_surface = pygame.Surface((w + 2 * x_offset, h + 2 * y_offset), pygame.SRCALPHA)
new_surface.fill((0, 0, 0, 0))
new_surface = new_surface.convert_alpha()
red_layer, green_layer, blue_layer = rgb_split_channels_alpha(pyimage)
if self.gl.split_red_checkbox.get() == 1:
new_surface.blit(red_layer, (0, 0))
if self.gl.split_green_checkbox.get() == 1:
new_surface.blit(green_layer.convert_alpha(), (x_offset, y_offset), special_flags=pygame.BLEND_RGB_ADD)
if self.gl.split_blue_checkbox.get() == 1:
new_surface.blit(blue_layer.convert_alpha(),
(x_offset * 2, y_offset * 2), special_flags=pygame.BLEND_RGB_ADD)
#
self.refresh(new_surface)
new_surface = new_surface.convert_alpha()
pyimage = pygame.transform.smoothscale(new_surface, (64, 64))
# CONVERT PYGAME SURFACE TO TKINTER PHOTOIMAGE
tkinter_image = pygame_to_tkinter(pyimage, 64, 64)
self.rgbsplit_preview.image = tkinter_image
self.rgbsplit_preview.config(image=tkinter_image)
self.rgbsplit_preview.update()
def rgb_split_effect(self):
rgpsplit_labelframe = LabelFrame(
self.effect_labelframe, text="RGB split", bg=self.gl.bkcolor,
width=475, height=110, bd=2)
rgpsplit_labelframe.place(x=480, y=5)
self.rgb_split = Checkbutton(
rgpsplit_labelframe, text='RGB split', bg=self.gl.bkcolor, variable=self.gl.rgbsplit_checkbox,
onvalue=1, offvalue=0, command=dummy)
self.rgb_split.place(x=5, y=25)
rgb_split_balloon = Pmw.Balloon(self.root)
rgb_split_balloon.bind(self.rgb_split, "Create RGB split effect")
Label(rgpsplit_labelframe, bg=self.gl.bkcolor, text="x offset").place(x=80, y=10)
x_offset = Entry(rgpsplit_labelframe, width=4, bd=2, textvariable=self.gl.rgbsplitxoffset)
x_offset.place(x=130, y=10)
x_offset.bind('<FocusOut>', lambda event: self.rgb_split_preview_update())
x_offset_balloon = Pmw.Balloon(self.root)
x_offset_balloon.bind(x_offset, "X offset for layer RGB")
Label(rgpsplit_labelframe, bg=self.gl.bkcolor, text="y offset").place(x=80, y=40)
y_offset = Entry(rgpsplit_labelframe, validate="key", width=4, bd=2, textvariable=self.gl.rgbsplityoffset)
y_offset.place(x=130, y=40)
y_offset.bind('<FocusOut>', lambda event: self.rgb_split_preview_update())
y_offset_balloon = Pmw.Balloon(self.root)
y_offset_balloon.bind(y_offset, "Y offset for layer RGB")
split_red = Checkbutton(
rgpsplit_labelframe, text='Red', bg=self.gl.bkcolor, variable=self.gl.split_red_checkbox,
onvalue=1, offvalue=0, command=self.rgb_split_preview_update)
split_red.place(x=180, y=5)
split_red_balloon = Pmw.Balloon(self.root)
split_red_balloon.bind(split_red, "Split red channel")
split_green = Checkbutton(
rgpsplit_labelframe, text='Green', bg=self.gl.bkcolor, variable=self.gl.split_green_checkbox,
onvalue=1, offvalue=0, command=self.rgb_split_preview_update)
split_green.place(x=180, y=25)
split_green_balloon = Pmw.Balloon(self.root)
split_green_balloon.bind(split_green, "Split green channel")
split_blue = Checkbutton(
rgpsplit_labelframe, text='Blue', bg=self.gl.bkcolor, variable=self.gl.split_blue_checkbox,
onvalue=1, offvalue=0, command=self.rgb_split_preview_update)
split_blue.place(x=180, y=45)
split_blue_balloon = Pmw.Balloon(self.root)
split_blue_balloon.bind(split_blue, "Split blue channel")
self.rgbsplit_preview = Label(
rgpsplit_labelframe, bg=self.gl.bkcolor, relief=RAISED, image=self.gl.preview_image)
self.rgbsplit_preview.place(x=360, y=10)
Label(rgpsplit_labelframe, text="Start frame", bg=self.gl.bkcolor).place(x=240, y=10)
Entry(rgpsplit_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.rgbsplit_start_frame).place(x=310, y=10)
Label(rgpsplit_labelframe, text="End frame", bg=self.gl.bkcolor).place(x=240, y=45)
Entry(rgpsplit_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.rgbsplit_end_frame).place(x=310, y=45)
def transition_effect(self):
transition_labelframe = LabelFrame(self.effect_labelframe, text="Transition", bg=self.gl.bkcolor,
width=475, height=110, bd=2)
transition_labelframe.place(x=480, y=125)
transition = Checkbutton(
transition_labelframe, text='Transition', bg=self.gl.bkcolor, variable=self.gl.transition_checkbox,
onvalue=1, offvalue=0, command=dummy)
transition.place(x=5, y=25)
transition_checkbutton_ballon = Pmw.Balloon(self.root)
transition_checkbutton_ballon.bind(transition, "Enable transition effect")
def load_transition_texture():
filename = filedialog.askopenfilename(
initialdir=self.gl.path, title="Select a shape",
filetypes=self.gl.file_format)
try:
self.gl.transition_texture = pygame.image.load(filename).convert_alpha()
except Exception as error:
messagebox.showerror("Error", "Cannot open file : %s\nError: %s " % (filename, error))
return
texture = Button(
transition_labelframe, text="Texture",
width=7, height=1, bg=self.gl.bkcolor, command=lambda: load_transition_texture())
texture.place(x=90, y=25)
texture_checkbutton_ballon = Pmw.Balloon(self.root)
texture_checkbutton_ballon.bind(texture, "Load an image for the transition effect.\n"
"The sprites will progressively blend toward\n"
"the loaded image.")
# Checkbutton(
# transition_labelframe, text='Alpha 1', bg=self.gl.bkcolor,
# variable=self.gl.transition_alpha1, onvalue=1, offvalue=0, command=dummy).place(x=160, y=5)
#
# Checkbutton(transition_labelframe, text='Alpha 2', bg=self.gl.bkcolor, variable=self.gl.transition_alpha2,
# onvalue=1, offvalue=0, command=dummy).place(x=160, y=45)
self.transition_preview = Label(
transition_labelframe, bg=self.gl.bkcolor, relief=RAISED, image=self.gl.preview_image)
self.transition_preview.place(x=360, y=10)
Label(transition_labelframe, text="Start frame", bg=self.gl.bkcolor).place(x=240, y=10)
Entry(transition_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.transition_start_frame).place(x=310, y=10)
Label(transition_labelframe, text="End frame", bg=self.gl.bkcolor).place(x=240, y=45)
Entry(transition_labelframe, validate="key", width=4,
bd=2, textvariable=self.gl.transition_end_frame).place(x=310, y=45)
def glitch_effect(self):
glitch_labelframe = LabelFrame(self.effect_labelframe, text="Glitch", bg=self.gl.bkcolor,
width=475, height=110, bd=2)
glitch_labelframe.place(x=480, y=245)
glitch = Checkbutton(
glitch_labelframe, text='Glitch', bg=self.gl.bkcolor, variable=self.gl.glitch_checkbox,
onvalue=1, offvalue=0, command=dummy)
glitch.place(x=5, y=25)
glitch_checkbutton_ballon = Pmw.Balloon(self.root)
glitch_checkbutton_ballon.bind(glitch, "Enable glitch effect")
horiz = Checkbutton(
glitch_labelframe, text='Horizontal', bg=self.gl.bkcolor, variable=self.gl.glitch_horizontal,
onvalue=1, offvalue=0, command=dummy)
horiz.place(x=70, y=10)
horiz_checkbutton_ballon = Pmw.Balloon(self.root)
horiz_checkbutton_ballon.bind(horiz, "Create an horizontal glitch effect")
vert = Checkbutton(
glitch_labelframe, text='Vertical', bg=self.gl.bkcolor, variable=self.gl.glitch_vertical,
onvalue=1, offvalue=0, command=dummy)
vert.place(x=70, y=45)
vert_checkbutton_ballon = Pmw.Balloon(self.root)
vert_checkbutton_ballon.bind(vert, "Create an vertical glitch effect")
self.glitch_preview = Label(glitch_labelframe, bg=self.gl.bkcolor, relief=RAISED, image=self.gl.preview_image)
self.glitch_preview.place(x=360, y=10)
Label(glitch_labelframe, text="Start frame", bg=self.gl.bkcolor).place(x=240, y=10)
Entry(glitch_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.glitch_start_frame).place(x=310, y=10)
Label(glitch_labelframe, text="End frame", bg=self.gl.bkcolor).place(x=240, y=45)
Entry(glitch_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.glitch_end_frame).place(x=310, y=45)
def miscelleanous_effect(self):
misc_labelframe = LabelFrame(
self.effect_labelframe, text="Miscellaneous", bg=self.gl.bkcolor,
width=475, height=110, bd=2)
misc_labelframe.place(x=480, y=365)
# DITHERING
dithering_labelframe = LabelFrame(
misc_labelframe, text="dithering", bg=self.gl.bkcolor, width=110, height=85, bd=2)
dithering_labelframe.place(x=5, y=0)
dither = Checkbutton(
dithering_labelframe, text='dithering', bg=self.gl.bkcolor, variable=self.gl.dithering,
onvalue=1, offvalue=0, command=dummy)
dither.place(x=0, y=5)
dither_entry = Entry(dithering_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.dithering_value)
dither_entry.place(x=5, y=35)
dither_checkbutton_ballon = Pmw.Balloon(self.root)
dither_checkbutton_ballon.bind(
dither_entry, "Dithered images, particularly those with relatively\n"
" few colors, can often be distinguished by a characteristic\n"
" graininess or speckled appearance. Choose a value in range [0 ... 16]")
# PIXEL
pixel_labelframe = LabelFrame(misc_labelframe, text="Pixelated", bg=self.gl.bkcolor,
width=150, height=85, bd=2)
pixel_labelframe.place(x=120, y=0)
pixel_checkbutton = Checkbutton(
pixel_labelframe, text='Pixelated', bg=self.gl.bkcolor, variable=self.gl.pixel,
onvalue=1, offvalue=0, command=dummy)
pixel_checkbutton.place(x=5, y=5)
Label(pixel_labelframe, text="pixel block size", bg=self.gl.bkcolor).place(x=5, y=35)
pixel_width_entry = Entry(pixel_labelframe, validate="key", width=4, bd=2,
textvariable=self.gl.pixel_size)
pixel_width_entry.place(x=90, y=35)
pixel_checkbutton_ballon = Pmw.Balloon(self.root)
pixel_checkbutton_ballon.bind(
pixel_width_entry, "Pixelate an images\nChoose one of the following values [4, 8, 16, 32, 64]")
sepia_checkbutton = Checkbutton(
misc_labelframe, text='Sepia', bg=self.gl.bkcolor, variable=self.gl.sepia,
onvalue=1, offvalue=0, command=dummy)
sepia_checkbutton.place(x=300, y=15)
gray_checkbutton = Checkbutton(
misc_labelframe, text='Greyscale', bg=self.gl.bkcolor, variable=self.gl.greyscale,
onvalue=1, offvalue=0, command=dummy)
gray_checkbutton.place(x=300, y=40)
def preview_options(self):
preview_label = LabelFrame(self.root, text="Preview options", bg=self.gl.bkcolor, width=475, height=160, bd=2)
preview_label.place(x=5, y=635)
def show_milliseconds(scale_value):
v = int(scale_value)
if not isinstance(v, int):
return
if v == 0:
return
tick = int(float(1 / v) * 1000)
fps.config(text="%s msecs" % tick)
if isinstance(self.gl.pyimage, list):
frames = len(self.gl.pyimage)
t = round(((1/v) * frames), 3)
self.duration.config(text=str(t))
else:
return
fps = Label(preview_label, text="fps: ", bg="#858585")
fps.place(x=190, y=50)
Label(preview_label, text="Frame number :", bg="#858585").place(x=5, y=80)
self.frame_number = Label(preview_label, text="0", bg="#858585")
self.frame_number.place(x=100, y=80)
Label(preview_label, text="Duration in secs :", bg="#858585").place(x=5, y=100)
self.duration = Label(preview_label, text="0", bg="#858585")
self.duration.place(x=100, y=100)
self.preview_delay_value = Scale(
preview_label, bg="#858585", orient=HORIZONTAL, bd=2, relief=FLAT,
activebackground="#858585", troughcolor="#858585", variable=self.gl.preview_scale_delay, length=180,
highlightbackground="#858585", from_=1, to_=200, width=10, command=show_milliseconds)
self.preview_delay_value.place(x=5, y=35)
preview_delay_value_balloon = Pmw.Balloon(self.root)
preview_delay_value_balloon.bind(self.preview_delay_value, "Adjust the FPS, default 60 frames per seconds")
show_milliseconds(self.gl.preview_scale_delay.get())
self.preview_button = Button(
preview_label, text="Apply / Preview", width=20, height=1, bg="#858585",
command=lambda: self.preview(int(self.preview_delay_value.get())))
self.preview_button.place(x=10, y=5)
preview_button_balloon = Pmw.Balloon(self.root)
preview_button_balloon.bind(self.preview_button, "Preview effect(s) on canvas and pygame display")
self.preview_button.config(state=DISABLED)
self.checker_background_value = Checkbutton(
preview_label, text='Checker bck', bg="#858585", variable=self.gl.checker_value, onvalue=1, offvalue=0)
self.checker_background_value.place(x=250, y=110)
checker_balloon = Pmw.Balloon(self.root)
checker_balloon.bind(
self.checker_background_value,
"Display a checker background during the \n"
"rendering (only for 32-bit image format)")
inverse_labelframe = LabelFrame(
preview_label, text="Inverse", bg=self.gl.bkcolor, width=200, height=110, bd=2)
inverse_labelframe.place(x=250, y=0)
inverse_checkbox = Checkbutton(
inverse_labelframe, text='Inverse', bg="#858585", variable=self.gl.inverse_variable, onvalue=1, offvalue=0)
inverse_checkbox.place(x=0, y=15)
inverse_balloon = Pmw.Balloon(self.root)
inverse_balloon.bind(
inverse_checkbox, "Inverse (negative) effect apply to all texture")
def exc_button_state():
if self.gl.inverse_exclude_variable.get():
self.exclude_button_inv.configure(state="normal")
self.exclude_red_entry.configure(state="normal")
self.exclude_green_entry.configure(state="normal")
self.exclude_blue_entry.configure(state="normal")
else:
self.exclude_button_inv.configure(state="disabled")
self.exclude_red_entry.configure(state="disabled")
self.exclude_green_entry.configure(state="disabled")
self.exclude_blue_entry.configure(state="disabled")
inverse_exclude_checkbox = Checkbutton(
inverse_labelframe, text='Exclude', bg="#858585",
variable=self.gl.inverse_exclude_variable, onvalue=1, offvalue=0, command=exc_button_state)
inverse_exclude_checkbox.place(x=0, y=45)
inverse_exclude_balloon = Pmw.Balloon(self.root)
inverse_exclude_balloon.bind(
inverse_exclude_checkbox, "Exclude a specific color for the inverse effect")
# Label(inverse_labelframe, text="Exclude", bg=self.gl.bkcolor).place(x=75, y=0)
Label(inverse_labelframe, text="Red", bg=self.gl.bkcolor).place(x=75, y=0)
Label(inverse_labelframe, text="Green", bg=self.gl.bkcolor).place(x=75, y=30)
Label(inverse_labelframe, text="Blue", bg=self.gl.bkcolor).place(x=75, y=60)
self.exclude_red_entry = Entry(
inverse_labelframe, validate="key", width=4, bd=2, textvariable=self.gl.exclude_red_inv, | |
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy import integrate
from matplotlib import ticker
%matplotlib notebook
plt.rcParams['figure.dpi']=150
plt.rcParams['figure.figsize']=(6,4)
def XASload(dir,de,ate,ext,interp,groupA,groupB,exc,y,shift):
a=[];b=[];
for i in range(de,ate+1):
c = '{:04d}'.format(i)
if '2' in ext:
energy_i,col2,col3,col4,i0_i,sample_i,norm_i,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
if i == de:
energy = np.arange(round(energy_i[0]+2,3),round(energy_i[-1]-2,3),0.01)
interp_norm_i = interp1d(energy_i, norm_i, kind='linear')
norm = interp_norm_i(energy)
interp_i0_i = interp1d(energy_i, i0_i, kind='linear')
i0 = interp_i0_i(energy)
interp_sample_i = interp1d(energy_i, sample_i, kind='linear')
sample = interp_sample_i(energy)
elif interp == 0:
col0,energy_i,col2,col3,col4,col5,i0_i,col7,sample_i,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
energy = energy_i
norm = np.array(sample_i/i0_i)
i0 = i0_i
sample = sample_i
else:
col0,energy_i,col2,col3,col4,col5,i0_i,col7,sample_i,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
norm_i = np.array(sample_i/i0_i)
if i == de:
energy = np.arange(round(energy_i[0]+2,3),round(energy_i[-1]-2,3),0.01)
# JCC em 20180412: reduzimos o intervalo de energia em 1% no começo e no final para evitar erros de interpolação... Tem que melhorar...
interp_norm_i = interp1d(energy_i, norm_i, kind='linear')
norm = interp_norm_i(energy)
interp_i0_i = interp1d(energy_i, i0_i, kind='linear')
i0 = interp_i0_i(energy)
interp_sample_i = interp1d(energy_i, sample_i, kind='linear')
sample = interp_sample_i(energy)
if y == 0: yaxis = i0; xaxis = energy
elif y == 1: yaxis = sample; xaxis = energy
elif y == 2: yaxis = norm; xaxis = energy;
elif y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
if i == de:
energy_new = np.arange(round(energy_shift[0],3),round(energy_shift[-1],3),0.05)
interp_sample = interp1d(energy_shift, sample, kind='linear', fill_value='extrapolate')
sample_new = interp_sample(energy_new)
yaxis = sample_new; xaxis = energy_new
elif y == 21:
energy_shift,energy_cut,norm_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
if i == de:
energy_new = np.arange(round(energy_shift[0],3),round(energy_shift[-1],3),0.05)
interp_norm = interp1d(energy_shift, norm, kind='linear', fill_value='extrapolate')
norm_new = interp_norm(energy_new)
yaxis = norm_new; xaxis = energy_new
c = int(c)
t = c - de + 1
if (t in groupA) and (t not in exc):
a.append(yaxis)
elif (t in groupB) and (t not in exc):
b.append(yaxis)
return a,b,xaxis
def SEARCHindex(axis,pt):
test = []
for i in axis:
b = abs(pt - i)
test.append(round(b,1))
idx = test.index(min(test))
return idx
def XASavg(a):
sum = 0
for i in a:
sum += i
avg = sum/(len(a))
return avg
def XASbg(yaxis,xaxis,bg):
idx = SEARCHindex(xaxis,bg)
bg = yaxis[idx]
yaxis_bg = yaxis - bg
return yaxis_bg
def XASnor(yaxis,xaxis,xas,nor):
idx = SEARCHindex(xaxis,nor)
yaxis_nor = yaxis/(xas[idx])
return yaxis_nor
def CONFIGplt(ylabel='norm',xlabel='energy (eV)',grid=True):
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.grid(False)
plt.legend()
plt.show()
def XASshift(xaxis,yaxis,ref=779.72,left=-0.5,right=0.5): #ex: energy_shift,energy_cut,norm_cut,popt,a,x0,sigma,gauss = XASshift(energy,norm,ref=779.72,left=0.5,right=0.5)
#defining the gaussian function
def gauss(x,a,x0,sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
#cutting the data around L3
ref_ini = ref + left
ref_fin = ref + right
test_ini = []
test_fin = []
for i in xaxis:
a = abs(ref_ini - i)
test_ini.append(round(a,1))
b = abs(ref_fin - i)
test_fin.append(round(b,1))
ref_ini_idx = test_ini.index(min(test_ini))
ref_fin_idx = test_fin.index(min(test_fin))
yaxis_cut = yaxis[int(ref_ini_idx):int(ref_fin_idx)]
xaxis_cut = xaxis[int(ref_ini_idx):int(ref_fin_idx)]
#fitting the peak
popt,pcov = curve_fit(gauss,xaxis_cut,yaxis_cut,p0=[max(yaxis),ref,1],bounds=([min(yaxis),ref_ini,0],[max(yaxis),ref_fin,5]))
a,x0,sigma = popt[0],popt[1],popt[2]
#shifting the xaxis
shift = ref - x0
xaxis_shift = xaxis + shift
return xaxis_shift,xaxis_cut,yaxis_cut,popt,a,x0,sigma,gauss(xaxis,a,x0,sigma)
def XASshiftEXPORT(dir,ext='.dat',scans=[],nor=779.7,bg=772,y=21,shift=[779.7,-0.5,0.5]): #ex: energy_shift,energy_cut,norm_cut,popt,a,x0,sigma,gauss = XASshift(energy,norm,ref=779.72,left=0.5,right=0.5)
for i in scans:
c = '{:04d}'.format(i)
if '2' in ext:
energy_i,col2,col3,col4,i0_i,sample_i,norm_i,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
if i == scans[0]:
energy = np.arange(round(energy_i[0],3),round(energy_i[-1],3),0.05)
interp_norm_i = interp1d(energy_i, norm_i, kind='linear', fill_value='extrapolate')
norm = interp_norm_i(energy)
#interp_i0_i = interp1d(energy_i, i0_i, kind='linear', fill_value='extrapolate')
#i0 = interp_i0_i(energy)
interp_sample_i = interp1d(energy_i, sample_i, kind='linear', fill_value='extrapolate')
sample = interp_sample_i(energy)
else:
col0,energy_i,col2,col3,col4,col5,i0_i,col7,sample_i,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
energy = energy_i
norm = np.array(sample_i/i0_i)
#i0 = i0_i
sample = sample_i
if y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
elif y == 21:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
if i == scans[0]:
energy_new = np.arange(round(energy_shift[0],3),round(energy_shift[-1],3),0.05)
xaxis = energy_new
if y == 11:
interp_sample = interp1d(energy_shift, sample, kind='linear', fill_value='extrapolate')
sample_new = interp_sample(energy_new)
yaxis = sample_new
elif y == 21:
interp_norm = interp1d(energy_shift, norm, kind='linear', fill_value='extrapolate')
norm_new = interp_norm(energy_new)
yaxis = norm_new
filename = dir+c+'-SHIFTexport.dat'
head = '#xaxis_new,yaxis_new\n'
file = open(filename,'w')
file.write(head)
for i in range(len(xaxis)):
line = str(xaxis[i])+','+str(yaxis[i])+'\n'
file.write(line)
file.close()
def XASplot(dir,scans=[],ext='.dat',marker='',y=2,shift=[779.72,-0.5,0.5],ymin=888,ymax=888):
for i in scans:
c = '{:04d}'.format(i)
if '2' in ext:
energy,col2,col3,col4,i0,sample,norm,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
else:
col0,energy,col2,col3,col4,col5,i0,col7,sample,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
norm = np.array(sample/i0)
if y == 0: yaxis = i0; ylab = 'i0'; xaxis = energy
elif y == 1: yaxis = sample; ylab = 'sample'; xaxis = energy
elif y == 2: yaxis = norm; ylab = 'norm'; xaxis = energy
elif y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
yaxis = sample; ylab = 'sample'; xaxis = energy_shift
elif y == 21:
energy_shift,energy_cut,norm_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
yaxis = norm; ylab = 'norm'; xaxis = energy_shift
plt.plot(xaxis, yaxis, linestyle='-',linewidth=1.2, label=str(i), marker=marker)
plt.grid(False)
if ymin!= 888:
plt.ylim((ymin,ymax))
CONFIGplt(ylabel=ylab)
else:
CONFIGplt(ylabel=ylab)
def XASplotAVG(dir,scans=[],ext='.dat',marker='',y=2,shift=[779.72,-0.5,0.5],ymin=888,ymax=888):
a=[]
for i in scans:
c = '{:04d}'.format(i)
if '2' in ext:
energy,col2,col3,col4,i0,sample,norm,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
else:
col0,energy,col2,col3,col4,col5,i0,col7,sample,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
norm = np.array(sample/i0)
if y == 0: yaxis = i0; ylab = 'i0'; xaxis = energy
elif y == 1: yaxis = sample; ylab = 'sample'; xaxis = energy
elif y == 2: yaxis = norm; ylab = 'norm'; xaxis = energy
elif y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
yaxis = sample; ylab = 'sample'; xaxis = energy_shift
elif y == 21:
energy_shift,energy_cut,norm_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
yaxis = norm; ylab = 'norm'; xaxis = energy_shift
a.append(yaxis)
media = XASavg(a)
plt.plot(xaxis, yaxis, linestyle='-',linewidth=1.2, label=str(i), marker=marker)
if ymin!= 888:
plt.ylim((ymin,ymax))
CONFIGplt(ylabel=ylab)
else:
CONFIGplt(ylabel=ylab)
def XASplotBGnor(dir,scans=[],ext='.dat',marker='',y=2,shift=[779.72,-0.5,0.5],ymin=888,ymax=888,bg=775,nor=777.9):
for i in scans:
c = '{:04d}'.format(i)
if '2' in ext:
energy,col2,col3,col4,i0,sample,norm,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
else:
col0,energy,col2,col3,col4,col5,i0,col7,sample,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
norm = np.array(sample/i0)
if y == 0: yaxis = i0; ylab = 'i0'; xaxis = energy
elif y == 1: yaxis = sample; ylab = 'sample'; xaxis = energy
elif y == 2: yaxis = norm; ylab = 'Normalized Intensity (a. u.)'; xaxis = energy; xlab = 'Energy (eV)'
elif y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
yaxis = sample; ylab = 'sample'; xaxis = energy_shift
elif y == 21:
energy_shift,energy_cut,norm_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
yaxis = norm; ylab = 'norm'; xaxis = energy_shift
yaxis_bg = XASbg(yaxis,xaxis,bg)
yaxis_bg_nor = XASnor(yaxis_bg,xaxis,yaxis_bg,nor)
plt.plot(xaxis, yaxis_bg_nor, linestyle='-',linewidth=1.2,label=str(i),marker=marker)
if ymin!= 888:
plt.ylim((ymin,ymax))
CONFIGplt(ylabel=ylab)
else:
CONFIGplt(ylabel=ylab)
def XASplotBGnor_export(dir,scans=[],ext='.dat',marker='',y=2,shift=[779.72,-0.5,0.5],ymin=888,ymax=888,bg=775,nor=777.9,name=[]):
# Para espectros adquiridos sem polarização/quartetos ou sem dicroísmo. Ex: <NAME> do oxigênio
for i in scans:
c = '{:04d}'.format(i)
if '2' in ext:
energy,col2,col3,col4,i0,sample,norm,col8,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=1,delimiter=',',unpack=True)
else:
col0,energy,col2,col3,col4,col5,i0,col7,sample,col9 = np.loadtxt(dir+'_'+c+ext,skiprows=7,delimiter=',',unpack=True)
norm = np.array(sample/i0)
if y == 0: yaxis = i0; ylab = 'i0'; xaxis = energy
elif y == 1: yaxis = sample; ylab = 'sample'; xaxis = energy
elif y == 2: yaxis = norm; ylab = 'Normalized Intensity (a. u.)'; xaxis = energy; xlab = 'Energy (eV)'
elif y == 11:
energy_shift,energy_cut,sample_cut,popt,amp,x0,sigma,gauss = XASshift(energy,sample,ref=shift[0],left=shift[1],right=shift[2])
yaxis = sample; ylab = 'sample'; xaxis = energy_shift
elif y == 21:
energy_shift,energy_cut,norm_cut,popt,amp,x0,sigma,gauss = XASshift(energy,norm,ref=shift[0],left=shift[1],right=shift[2])
yaxis = norm; ylab = 'norm'; xaxis = energy_shift
yaxis_bg = XASbg(yaxis,xaxis,bg)
yaxis_bg_nor = XASnor(yaxis_bg,xaxis,yaxis_bg,nor)
plt.plot(xaxis, yaxis_bg_nor, linestyle='-',linewidth=1.2,label=str(i),marker=marker)
plt.axhline(y=0,color='k',linestyle='--',linewidth=0.5)
plt.grid(False)
if ymin!= 888:
plt.ylim((ymin,ymax))
CONFIGplt(ylabel=ylab)
else:
CONFIGplt(ylabel=ylab)
### vitoraolima july 24 2021
filename = name+'_XASexport.dat'
head = '#energy,xas,xas_nor\n'
file = open(filename,'w')
file.write(head)
yaxis_bg_nor = XASnor(yaxis_bg,xaxis,yaxis_bg,nor)
for i in range(len(energy)):
line = str(energy[i])+','+str(yaxis_bg[i])+','+str(yaxis_bg_nor[i])+'\n'
file.write(line)
file.close()
# This function is the same funcion as "XMCDplot", reported in the end of this macro
def XAS_and_XMCD_plot(dir,de,ate,ext='.dat',interp=0,pos=[1,4,5,8],neg=[2,3,6,7],exc=[],nor=779.7,bg=772,y=2,shift=[779.7,-0.5,0.5]):
p,n,energy = XASload(dir,de,ate,ext,interp,pos,neg,exc,y,shift)
pmedia = XASavg(p)
nmedia = XASavg(n)
pmedia_bg = XASbg(pmedia,energy,bg)
nmedia_bg = XASbg(nmedia,energy,bg)
xas = (pmedia_bg + nmedia_bg)/2
dif = pmedia_bg - nmedia_bg
pmedia_bg_nor = XASnor(pmedia_bg,energy,xas,nor)
nmedia_bg_nor = XASnor(nmedia_bg,energy,xas,nor)
xas_nor = XASnor(xas,energy,xas,nor)
dif_nor = XASnor(dif,energy,xas,nor)
plt.plot(energy, pmedia_bg_nor, linestyle='-', color='black',linewidth=1.5, label='pos_avg');
plt.plot(energy, nmedia_bg_nor, linestyle='-', color='red',linewidth=1.5, label='neg_avg');
plt.plot(energy, | |
3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / (height + eps))
dw = np.log(gt_width / (width + eps))
return np.stack([dy, dx, dh, dw], axis=1)
def compose_image_meta(image_id, original_image_shape, window, scale, active_class_ids, config):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list((config['img_size'],
config['img_size'], 3)) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image coordinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks,
train_rois_per_image, roi_pos_ratio, num_classes,
bbox_std, use_mini_mask, mask_shape, image_shape,
):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to <NAME>'s paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(train_rois_per_image * roi_pos_ratio)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = train_rois_per_image - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = train_rois_per_image - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == train_rois_per_image, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], train_rois_per_image)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((train_rois_per_image,
num_classes, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= bbox_std
# Generate class-specific target masks
masks = np.zeros((train_rois_per_image, mask_shape[0], mask_shape[1], num_classes),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if use_mini_mask:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(image_shape[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = resize(m, mask_shape)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def mold_image(images, mean_pixel):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - mean_pixel
def unmold_image(normalized_images, mean_pixel):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + mean_pixel).astype(np.uint8)
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def compute_backbone_shapes(config):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
image_shape = (config['img_size'], config['img_size'])
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config['backbone_strides']])
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number | |
0 && b < 0 && a >= sum)
# sum = INT64_MAX;
# }
#
# Optimizations similar to the iadd_sat case are applied here.
(('isub_sat@64', a, b), ('bcsel',
('iand', ('iand', ('ilt', a, 0), ('ige', b, 0)), ('ige', ('isub', a, b), 0)),
0x8000000000000000,
('bcsel',
('ior', ('ior', ('ilt', a, 0), ('ige', b, 0)), ('ige', ('isub', a, b), 0)),
('isub', a, b),
0x7fffffffffffffff)),
'(options->lower_int64_options & nir_lower_iadd64) != 0'),
# These are done here instead of in the backend because the int64 lowering
# pass will make a mess of the patterns. The first patterns are
# conditioned on nir_lower_minmax64 because it was not clear that it was
# always an improvement on platforms that have real int64 support. No
# shaders in shader-db hit this, so it was hard to say one way or the
# other.
(('ilt', ('imax(is_used_once)', 'a@64', 'b@64'), 0), ('ilt', ('imax', ('unpack_64_2x32_split_y', a), ('unpack_64_2x32_split_y', b)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
(('ilt', ('imin(is_used_once)', 'a@64', 'b@64'), 0), ('ilt', ('imin', ('unpack_64_2x32_split_y', a), ('unpack_64_2x32_split_y', b)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
(('ige', ('imax(is_used_once)', 'a@64', 'b@64'), 0), ('ige', ('imax', ('unpack_64_2x32_split_y', a), ('unpack_64_2x32_split_y', b)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
(('ige', ('imin(is_used_once)', 'a@64', 'b@64'), 0), ('ige', ('imin', ('unpack_64_2x32_split_y', a), ('unpack_64_2x32_split_y', b)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
(('ilt', 'a@64', 0), ('ilt', ('unpack_64_2x32_split_y', a), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
(('ige', 'a@64', 0), ('ige', ('unpack_64_2x32_split_y', a), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
(('ine', 'a@64', 0), ('ine', ('ior', ('unpack_64_2x32_split_x', a), ('unpack_64_2x32_split_y', a)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
(('ieq', 'a@64', 0), ('ieq', ('ior', ('unpack_64_2x32_split_x', a), ('unpack_64_2x32_split_y', a)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
# 0u < uint(a) <=> uint(a) != 0u
(('ult', 0, 'a@64'), ('ine', ('ior', ('unpack_64_2x32_split_x', a), ('unpack_64_2x32_split_y', a)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
# Alternative lowering that doesn't rely on bfi.
(('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
('bcsel', ('ult', 31, 'bits'),
'insert',
(('ior',
('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
'options->lower_bitfield_insert_to_shifts'),
# Alternative lowering that uses bitfield_select.
(('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
('bcsel', ('ult', 31, 'bits'), 'insert',
('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
'options->lower_bitfield_insert_to_bitfield_select'),
(('ibitfield_extract', 'value', 'offset', 'bits'),
('bcsel', ('ult', 31, 'bits'), 'value',
('ibfe', 'value', 'offset', 'bits')),
'options->lower_bitfield_extract'),
(('ubitfield_extract', 'value', 'offset', 'bits'),
('bcsel', ('ult', 31, 'bits'), 'value',
('ubfe', 'value', 'offset', 'bits')),
'options->lower_bitfield_extract'),
# (src0 & src1) | (~src0 & src2). Constant fold if src2 is 0.
(('bitfield_select', a, b, 0), ('iand', a, b)),
(('bitfield_select', a, ('iand', a, b), c), ('bitfield_select', a, b, c)),
# Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
(('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
(('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
(('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
(('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
(('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
(('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
# Section 8.8 (Integer Functions) of the GLSL 4.60 spec says:
#
# If bits is zero, the result will be zero.
#
# These patterns prevent other patterns from generating invalid results
# when count is zero.
(('ubfe', a, b, 0), 0),
(('ibfe', a, b, 0), 0),
(('ubfe', a, 0, '#b'), ('iand', a, ('ushr', 0xffffffff, ('ineg', b)))),
(('b2i32', ('i2b', ('ubfe', a, b, 1))), ('ubfe', a, b, 1)),
(('b2i32', ('i2b', ('ibfe', a, b, 1))), ('ubfe', a, b, 1)), # ubfe in the replacement is correct
(('ine', ('ibfe(is_used_once)', a, '#b', '#c'), 0), ('ine', ('iand', a, ('ishl', ('ushr', 0xffffffff, ('ineg', c)), b)), 0)),
(('ieq', ('ibfe(is_used_once)', a, '#b', '#c'), 0), ('ieq', ('iand', a, ('ishl', ('ushr', 0xffffffff, ('ineg', c)), b)), 0)),
(('ine', ('ubfe(is_used_once)', a, '#b', '#c'), 0), ('ine', ('iand', a, ('ishl', ('ushr', 0xffffffff, ('ineg', c)), b)), 0)),
(('ieq', ('ubfe(is_used_once)', a, '#b', '#c'), 0), ('ieq', ('iand', a, ('ishl', ('ushr', 0xffffffff, ('ineg', c)), b)), 0)),
(('ibitfield_extract', 'value', 'offset', 'bits'),
('bcsel', ('ieq', 0, 'bits'),
0,
('ishr',
('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
('isub', 32, 'bits'))),
'options->lower_bitfield_extract_to_shifts'),
(('ubitfield_extract', 'value', 'offset', 'bits'),
('iand',
('ushr', 'value', 'offset'),
('bcsel', ('ieq', 'bits', 32),
0xffffffff,
('isub', ('ishl', 1, 'bits'), 1))),
'options->lower_bitfield_extract_to_shifts'),
(('ifind_msb', 'value'),
('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
'options->lower_ifind_msb'),
(('find_lsb', 'value'),
('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
'options->lower_find_lsb'),
(('extract_i8', a, 'b@32'),
('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
'options->lower_extract_byte'),
(('extract_u8', a, 'b@32'),
('iand', ('ushr', a, ('imul', b, 8)), 0xff),
'options->lower_extract_byte'),
(('extract_i16', a, 'b@32'),
('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
'options->lower_extract_word'),
(('extract_u16', a, 'b@32'),
('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
'options->lower_extract_word'),
(('pack_unorm_2x16', 'v'),
('pack_uvec2_to_uint',
('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
'options->lower_pack_unorm_2x16'),
(('pack_unorm_4x8', 'v'),
('pack_uvec4_to_uint',
('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
'options->lower_pack_unorm_4x8'),
(('pack_snorm_2x16', 'v'),
('pack_uvec2_to_uint',
('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
'options->lower_pack_snorm_2x16'),
(('pack_snorm_4x8', 'v'),
('pack_uvec4_to_uint',
('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
'options->lower_pack_snorm_4x8'),
(('unpack_unorm_2x16', 'v'),
('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
('extract_u16', 'v', 1))),
65535.0),
'options->lower_unpack_unorm_2x16'),
(('unpack_unorm_4x8', 'v'),
('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
('extract_u8', 'v', 1),
('extract_u8', 'v', 2),
('extract_u8', 'v', 3))),
255.0),
'options->lower_unpack_unorm_4x8'),
(('unpack_snorm_2x16', 'v'),
('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
('extract_i16', 'v', 1))),
32767.0))),
'options->lower_unpack_snorm_2x16'),
(('unpack_snorm_4x8', 'v'),
('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
('extract_i8', 'v', 1),
('extract_i8', 'v', 2),
('extract_i8', 'v', 3))),
127.0))),
'options->lower_unpack_snorm_4x8'),
(('pack_half_2x16_split', 'a@32', 'b@32'),
('ior', ('ishl', ('u2u32', ('f2f16', b)), 16), ('u2u32', ('f2f16', a))),
'options->lower_pack_split'),
(('unpack_half_2x16_split_x', 'a@32'),
('f2f32', ('u2u16', a)),
'options->lower_pack_split'),
(('unpack_half_2x16_split_y', 'a@32'),
('f2f32', ('u2u16', ('ushr', a, 16))),
'options->lower_pack_split'),
(('pack_32_2x16_split', 'a@16', 'b@16'),
('ior', ('ishl', ('u2u32', b), 16), ('u2u32', a)),
'options->lower_pack_split'),
(('unpack_32_2x16_split_x', 'a@32'),
('u2u16', a),
'options->lower_pack_split'),
(('unpack_32_2x16_split_y', 'a@32'),
('u2u16', ('ushr', 'a', 16)),
'options->lower_pack_split'),
(('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
(('imin', ('imax', a, -1), 1), ('isign', a), '!options->lower_isign'),
(('imax', ('imin', a, 1), -1), ('isign', a), '!options->lower_isign'),
# float(0 < NaN) - float(NaN < 0) = float(False) - float(False) = 0 - 0 = 0
# Mark the new comparisons precise to prevent them being changed to 'a !=
# 0' or 'a == 0'.
(('fsign', a), ('fsub', ('b2f', ('!flt', 0.0, a)), ('b2f', ('!flt', a, 0.0))), 'options->lower_fsign'),
# Address/offset calculations:
# Drivers supporting imul24 should use the nir_lower_amul() pass, this
# rule converts everyone else to imul:
(('amul', a, b), ('imul', a, b), '!options->has_imul24'),
(('umul24', a, b),
('imul', ('iand', a, 0xffffff), ('iand', b, 0xffffff)),
'!options->has_umul24'),
(('umad24', a, b, c),
('iadd', ('imul', ('iand', a, 0xffffff), ('iand', b, 0xffffff)), c),
'!options->has_umad24'),
(('imad24_ir3', a, b, 0), ('imul24', a, b)),
(('imad24_ir3', a, 0, c), (c)),
(('imad24_ir3', a, 1, c), ('iadd', a, c)),
# if first two srcs are const, crack apart the imad so constant folding
# can clean up the imul:
# TODO ffma should probably get a similar rule:
(('imad24_ir3', '#a', '#b', c), ('iadd', ('imul', a, b), c)),
# These will turn 24b address/offset calc back into 32b shifts, but
# it should be safe to get back some of the bits of precision that we
# already decided were no necessary:
(('imul24', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
(('imul24', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
(('imul24', a, 0), (0)),
])
# bit_size dependent lowerings
for bit_size in [8, 16, 32, 64]:
# convenience constants
intmax = (1 << (bit_size - 1)) - 1
intmin = 1 << (bit_size - 1)
optimizations += [
(('iadd_sat@' + str(bit_size), a, b),
('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
(('isub_sat@' + str(bit_size), a, b),
('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
]
invert = OrderedDict([('feq', 'fneu'), ('fneu', 'feq')])
for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
('iand', (invert[left], a, b), (invert[right], c, d))))
optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
('ior', (invert[left], a, b), (invert[right], c, d))))
# Optimize x2bN(b2x(x)) -> x
for | |
import sys
import six
import threading
import traceback
from queue import PriorityQueue
from collections import namedtuple
from datetime import datetime, timedelta
import pytz
import html
from sqlalchemy import Column, Integer, ForeignKey, Unicode, Enum, func
from monitorrent.db import Base, DBSession, row2dict, UTCDateTime
from monitorrent.utils.timers import timer
from monitorrent.plugins.status import Status
class Logger(object):
def started(self, start_time):
"""
"""
def finished(self, finish_time, exception):
"""
"""
def info(self, message):
"""
"""
def failed(self, message, exc_type=None, exc_value=None, exc_tb=None):
"""
"""
def downloaded(self, message, torrent):
"""
"""
def _clamp(value, min_value=0, max_value=100):
return max(min_value, min(value, max_value))
class Engine(object):
def __init__(self, logger, settings_manager, trackers_manager, clients_manager, notifier_manager):
"""
:type logger: Logger
:type settings_manager: settings_manager.SettingsManager
:type trackers_manager: plugin_managers.TrackersManager
:type clients_manager: plugin_managers.ClientsManager
:type notifier_manager: plugin_managers.NotifierManager
"""
self.log = logger
self.settings_manager = settings_manager
self.trackers_manager = trackers_manager
self.clients_manager = clients_manager
self.notifier_manager = notifier_manager
def info(self, message):
self.log.info(message)
def failed(self, message, exc_type=None, exc_value=None, exc_tb=None):
self.log.failed(message, exc_type, exc_value, exc_tb)
def downloaded(self, message, torrent):
self.log.downloaded(message, torrent)
def update_progress(self, progress):
pass
def start(self, trackers_count, notifier_manager_execute):
return EngineTrackers(trackers_count, notifier_manager_execute, self)
def add_torrent(self, filename, torrent, old_hash, topic_settings):
"""
:type filename: str
:type old_hash: str | None
:type torrent: Torrent
:type topic_settings: clients.TopicSettings | None
:rtype: datetime
"""
existing_torrent = self.clients_manager.find_torrent(torrent.info_hash)
if existing_torrent:
self.info(u"Torrent <b>{0}</b> already added".format(filename))
elif self.clients_manager.add_torrent(torrent.raw_content, topic_settings):
old_existing_torrent = self.clients_manager.find_torrent(old_hash) if old_hash else None
if old_existing_torrent:
self.info(u"Updated <b>{0}</b>".format(filename))
else:
self.info(u"Add new <b>{0}</b>".format(filename))
if old_existing_torrent:
if self.clients_manager.remove_torrent(old_hash):
self.info(u"Remove old torrent <b>{0}</b>"
.format(html.escape(old_existing_torrent['name'])))
else:
self.failed(u"Can't remove old torrent <b>{0}</b>"
.format(html.escape(old_existing_torrent['name'])))
existing_torrent = self.clients_manager.find_torrent(torrent.info_hash)
if not existing_torrent:
raise Exception(u'Torrent {0} wasn\'t added'.format(filename))
return existing_torrent['date_added']
def execute(self, ids):
tracker_settings = self.settings_manager.tracker_settings
trackers = list(self.trackers_manager.trackers.items())
execute_trackers = dict()
tracker_topics = list()
for name, tracker in trackers:
topics = tracker.get_topics(ids)
if len(topics) > 0:
execute_trackers[name] = len(topics)
tracker_topics.append((name, tracker, topics))
if len(tracker_topics) == 0:
return
with self.notifier_manager.execute() as notifier_manager_execute:
with self.start(execute_trackers, notifier_manager_execute) as engine_trackers:
for name, tracker, topics in tracker_topics:
tracker.init(tracker_settings)
with engine_trackers.start(name) as engine_tracker:
tracker.execute(topics, engine_tracker)
class EngineExecute(object):
def __init__(self, engine, notifier_manager_execute):
"""
:type engine: Engine
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
"""
self.engine = engine
self.notifier_manager_execute = notifier_manager_execute
def info(self, message):
self.engine.info(message)
def failed(self, message, exc_type=None, exc_value=None, exc_tb=None):
self.engine.failed(message, exc_type, exc_value, exc_tb)
if exc_value is not None:
notify_message = message + u"\r\n" + six.text_type(exc_value)
else:
notify_message = message
self.notify(notify_message, self.notifier_manager_execute.notify_failed)
def downloaded(self, message, torrent):
self.engine.downloaded(message, torrent)
self.notify(message, self.notifier_manager_execute.notify_download)
def notify(self, message, method):
if self.notifier_manager_execute:
try:
method(message)
except:
self.engine.failed(u"Failed notify", *sys.exc_info())
class EngineTrackers(EngineExecute):
def __init__(self, trackers_count, notifier_manager_execute, engine):
"""
:type trackers_count: dict[str, int]
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
:type engine: Engine
"""
super(EngineTrackers, self).__init__(engine, notifier_manager_execute)
self.trackers_count = trackers_count
self.done_topics = 0
self.count_topics = sum(trackers_count.values())
self.tracker_topics_count = 0
def start(self, tracker):
self.tracker_topics_count = self.trackers_count.pop(tracker)
self.update_progress(0)
engine_tracker = EngineTracker(tracker, self, self.notifier_manager_execute, self.engine)
return engine_tracker
def update_progress(self, progress):
progress = _clamp(progress)
done_progress = 100 * self.done_topics / self.count_topics
current_progress = progress * self.tracker_topics_count / self.count_topics
self.engine.update_progress(done_progress + current_progress)
def __enter__(self):
self.info(u"Begin execute")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
self.failed(u"Exception while execute", exc_type, exc_val, exc_tb)
else:
self.info(u"End execute")
self.done_topics += self.tracker_topics_count
self.update_progress(100)
return True
class EngineTracker(EngineExecute):
def __init__(self, tracker, engine_trackers, notifier_manager_execute, engine):
"""
:type tracker: str
:type engine_trackers: EngineTrackers
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
:type engine: Engine
"""
super(EngineTracker, self).__init__(engine, notifier_manager_execute)
self.tracker = tracker
self.engine_trackers = engine_trackers
self.count = 0
def start(self, count):
return EngineTopics(count, self, self.notifier_manager_execute, self.engine)
def update_progress(self, progress):
progress = _clamp(progress)
self.engine_trackers.update_progress(progress)
def __enter__(self):
self.info(u"Start checking for <b>{0}</b>".format(self.tracker))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
self.failed(u"Failed while checking for <b>{0}</b>".format(self.tracker),
exc_type, exc_val, exc_tb)
else:
self.info(u"End checking for <b>{0}</b>".format(self.tracker))
return True
class EngineTopics(EngineExecute):
def __init__(self, count, engine_tracker, notifier_manager_execute, engine):
"""
:type count: int
:type engine_tracker: EngineTracker
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
:type engine: Engine
"""
super(EngineTopics, self).__init__(engine, notifier_manager_execute)
self.count = count
self.engine_tracker = engine_tracker
def start(self, index, topic_name):
progress = index * 100 / self.count
self.update_progress(progress)
return EngineTopic(topic_name, self, self.notifier_manager_execute, self.engine)
def update_progress(self, progress):
self.engine_tracker.update_progress(_clamp(progress))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
self.failed(u"Failed while checking topics", exc_type, exc_val, exc_tb)
return True
class EngineTopic(EngineExecute):
def __init__(self, topic_name, engine_topics, notifier_manager_execute, engine):
"""
:type topic_name: str
:type engine_topics: EngineTopics
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
:type engine: Engine
"""
super(EngineTopic, self).__init__(engine, notifier_manager_execute)
self.topic_name = topic_name
self.engine_topics = engine_topics
def start(self, count):
return EngineDownloads(count, self, self.notifier_manager_execute, self.engine)
def status_changed(self, old_status, new_status):
message = u"{0} status changed: {1}".format(self.topic_name, new_status)
self.notify(message, self.notifier_manager_execute.notify_status_changed)
log = self.engine.failed if new_status != Status.Ok else self.engine.info
log(message)
def update_progress(self, progress):
self.engine_topics.update_progress(_clamp(progress))
def __enter__(self):
self.info(u"Check for changes <b>{0}</b>".format(self.topic_name))
self.update_progress(0)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
self.failed(u"Exception while execute topic", exc_type, exc_val, exc_tb)
self.update_progress(100)
return True
class EngineDownloads(EngineExecute):
def __init__(self, count, engine_topic, notifier_manager_execute, engine):
"""
:type count: int
:type engine_topic: EngineTopic
:type notifier_manager_execute: plugin_managers.NotifierManagerExecute
:type engine: Engine
"""
super(EngineDownloads, self).__init__(engine, notifier_manager_execute)
self.count = count
self.engine_topic = engine_topic
def add_torrent(self, index, filename, torrent, old_hash, topic_settings):
progress = index * 100 // self.count
self.engine_topic.update_progress(progress)
return self.engine.add_torrent(filename, torrent, old_hash, topic_settings)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.failed("Exception while execute", exc_type, exc_val, exc_tb)
return True
class ExecuteSettings(Base):
__tablename__ = "settings_execute"
id = Column(Integer, primary_key=True)
interval = Column(Integer, nullable=False)
last_execute = Column(UTCDateTime, nullable=True)
class Execute(Base):
__tablename__ = 'execute'
id = Column(Integer, primary_key=True)
start_time = Column(UTCDateTime, nullable=False)
finish_time = Column(UTCDateTime, nullable=False)
status = Column(Enum('finished', 'failed'), nullable=False)
failed_message = Column(Unicode, nullable=True)
class ExecuteLog(Base):
__tablename__ = 'execute_log'
id = Column(Integer, primary_key=True)
execute_id = Column(ForeignKey('execute.id'))
time = Column(UTCDateTime, nullable=False)
message = Column(Unicode, nullable=False)
level = Column(Enum('info', 'warning', 'failed', 'downloaded'), nullable=False)
class DbLoggerWrapper(Logger):
def __init__(self, log_manager, settings_manager=None):
"""
:type log_manager: ExecuteLogManager
:type settings_manager: settings_manager.SettingsManager | None
"""
self._log_manager = log_manager
self._settings_manager = settings_manager
def started(self, start_time):
self._log_manager.started(start_time)
def finished(self, finish_time, exception):
self._log_manager.finished(finish_time, exception)
if self._settings_manager:
self._log_manager.remove_old_entries(self._settings_manager.remove_logs_interval)
def info(self, message):
self._log_manager.log_entry(message, 'info')
def failed(self, message, exc_type=None, exc_value=None, exc_tb=None):
if exc_value is not None:
formatted_exception = u''.join(traceback.format_exception(exc_type, exc_value, exc_tb))
failed_message = u'{0}<br/><pre>{1}</pre>'\
.format(message, html.escape(formatted_exception).replace(u'\n', u'<br/>'))
else:
failed_message = message
self._log_manager.log_entry(failed_message, 'failed')
def downloaded(self, message, torrent):
self._log_manager.log_entry(message, 'downloaded')
# noinspection PyMethodMayBeStatic
class ExecuteLogManager(object):
_execute_id = None
def started(self, start_time):
if self._execute_id is not None:
raise Exception('Execute already in progress')
with DBSession() as db:
# default values for not finished execute is failed and finish_time equal to start_time
execute = Execute(start_time=start_time, finish_time=start_time, status='failed')
db.add(execute)
db.commit()
self._execute_id = execute.id
def finished(self, finish_time, exception):
if self._execute_id is None:
raise Exception('Execute is not started')
with DBSession() as db:
# noinspection PyArgumentList
execute = db.query(Execute).filter(Execute.id == self._execute_id).first()
execute.status = 'finished' if exception is None else 'failed'
execute.finish_time = finish_time
if exception is not None:
execute.failed_message = html.escape(str(exception))
self._execute_id = None
def log_entry(self, message, level):
if self._execute_id is None:
raise Exception('Execute is not started')
self._log_entry(message, level)
def _log_entry(self, message, level):
with DBSession() as db:
execute_log = ExecuteLog(execute_id=self._execute_id, time=datetime.now(pytz.utc),
message=message, level=level)
db.add(execute_log)
def get_log_entries(self, skip, take):
with DBSession() as db:
downloaded_sub_query = db.query(ExecuteLog.execute_id, func.count(ExecuteLog.id).label('count')) \
.group_by(ExecuteLog.execute_id, ExecuteLog.level) \
.having(ExecuteLog.level == 'downloaded') \
.subquery()
failed_sub_query = db.query(ExecuteLog.execute_id, func.count(ExecuteLog.id).label('count')) \
.group_by(ExecuteLog.execute_id, ExecuteLog.level) \
.having(ExecuteLog.level == 'failed') \
.subquery()
result_query = db.query(Execute, downloaded_sub_query.c.count, failed_sub_query.c.count) \
.outerjoin(failed_sub_query, Execute.id == failed_sub_query.c.execute_id) \
.outerjoin(downloaded_sub_query, Execute.id == downloaded_sub_query.c.execute_id) \
.order_by(Execute.finish_time.desc()) \
.offset(skip) \
.limit(take)
result = []
for execute, downloads, fails in result_query.all():
execute_result = row2dict(execute)
execute_result['downloaded'] = downloads or 0
execute_result['failed'] = fails or 0
execute_result['is_running'] = execute.id == self._execute_id
result.append(execute_result)
execute_count = db.query(func.count(Execute.id)).scalar()
return result, execute_count
def remove_old_entries(self, prune_days):
# SELECT id FROM execute WHERE start_time <= datetime('now', '-10 days') ORDER BY id DESC LIMIT 1
with DBSession() as db:
prune_date = datetime.now(pytz.utc) - timedelta(days=prune_days)
execute_id = db.query(Execute.id) \
.filter(Execute.start_time <= prune_date) \
.order_by(Execute.id.desc()) \
.limit(1) \
.scalar()
if execute_id is not None:
db.query(ExecuteLog) \
.filter(ExecuteLog.execute_id <= execute_id) \
.delete(synchronize_session=False)
db.query(Execute) \
.filter(Execute.id <= execute_id) \
.delete(synchronize_session=False)
def is_running(self, execute_id=None):
if execute_id is not None:
return self._execute_id == execute_id
return self._execute_id is not None
def get_execute_log_details(self, execute_id, after=None):
with DBSession() as db:
filters = [ExecuteLog.execute_id == execute_id]
if after is not None:
filters.append(ExecuteLog.id > after)
log_entries = db.query(ExecuteLog).filter(*filters).all()
return [row2dict(e) for e in log_entries]
def get_current_execute_log_details(self, after=None):
if self._execute_id is None:
return None
return self.get_execute_log_details(self._execute_id, after)
class EngineRunner(threading.Thread):
RunMessage = namedtuple('RunMessage', ['priority', 'ids'])
StopMessage = namedtuple('StopMessage', ['priority'])
def __init__(self, logger, settings_manager, trackers_manager, clients_manager, notifier_manager, **kwargs):
"""
:type logger: Logger
:type | |
<reponame>zeou1/maggot_models<gh_stars>0
import math
from collections import defaultdict
from operator import itemgetter
import colorcet as cc
import matplotlib
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.utils import check_array, check_consistent_length
from graspy.embed import select_dimension, selectSVD
from graspy.models import SBMEstimator
from graspy.plot import heatmap
from graspy.utils import binarize, cartprod
from src.utils import get_sbm_prob, savefig
from .manual_colors import CLASS_COLOR_DICT
def _sort_inds(graph, inner_labels, outer_labels, sort_nodes):
sort_df = pd.DataFrame(columns=("inner_labels", "outer_labels"))
sort_df["inner_labels"] = inner_labels
sort_df["outer_labels"] = outer_labels
# get frequencies of the different labels so we can sort by them
inner_label_counts = _get_freq_vec(inner_labels)
outer_label_counts = _get_freq_vec(outer_labels)
# inverse counts so we can sort largest to smallest
# would rather do it this way so can still sort alphabetical for ties
sort_df["inner_counts"] = len(inner_labels) - inner_label_counts
sort_df["outer_counts"] = len(outer_labels) - outer_label_counts
# get node edge sums (not exactly degrees if weighted)
node_edgesums = graph.sum(axis=1) + graph.sum(axis=0)
sort_df["node_edgesums"] = node_edgesums.max() - node_edgesums
if sort_nodes:
by = [
"outer_counts",
"outer_labels",
"inner_counts",
"inner_labels",
"node_edgesums",
]
else:
by = ["outer_counts", "outer_labels", "inner_counts", "inner_labels"]
sort_df.sort_values(by=by, kind="mergesort", inplace=True)
sorted_inds = sort_df.index.values
return sorted_inds
def _sort_graph(graph, inner_labels, outer_labels, sort_nodes):
inds = _sort_inds(graph, inner_labels, outer_labels, sort_nodes)
graph = graph[inds, :][:, inds]
return graph
def _get_freqs(inner_labels, outer_labels=None):
# use this because unique would give alphabetical
_, outer_freq = _unique_like(outer_labels)
outer_freq_cumsum = np.hstack((0, outer_freq.cumsum()))
# for each group of outer labels, calculate the boundaries of the inner labels
inner_freq = np.array([])
for i in range(outer_freq.size):
start_ind = outer_freq_cumsum[i]
stop_ind = outer_freq_cumsum[i + 1]
_, temp_freq = _unique_like(inner_labels[start_ind:stop_ind])
inner_freq = np.hstack([inner_freq, temp_freq])
inner_freq_cumsum = np.hstack((0, inner_freq.cumsum()))
return inner_freq, inner_freq_cumsum, outer_freq, outer_freq_cumsum
def _get_freq_vec(vals):
# give each set of labels a vector corresponding to its frequency
_, inv, counts = np.unique(vals, return_counts=True, return_inverse=True)
count_vec = counts[inv]
return count_vec
def _unique_like(vals):
# gives output like
uniques, inds, counts = np.unique(vals, return_index=True, return_counts=True)
inds_sort = np.argsort(inds)
uniques = uniques[inds_sort]
counts = counts[inds_sort]
return uniques, counts
# assume that the graph has already been plotted in sorted form
def _plot_groups(
ax, divider, graph, sorted_inds, inner_labels, outer_labels=None, fontsize=30
):
inner_labels = np.array(inner_labels)
plot_outer = True
if outer_labels is None:
outer_labels = np.ones_like(inner_labels)
plot_outer = False
# sorted_inds = _sort_inds(graph, inner_labels, outer_labels, False)
inner_labels = inner_labels[sorted_inds]
outer_labels = outer_labels[sorted_inds]
inner_freq, inner_freq_cumsum, outer_freq, outer_freq_cumsum = _get_freqs(
inner_labels, outer_labels
)
inner_unique, _ = _unique_like(inner_labels)
outer_unique, _ = _unique_like(outer_labels)
# n_verts = graph.shape[0]
axline_kws = dict(linestyle="dashed", lw=0.9, alpha=0.3, zorder=3, color="grey")
# draw lines
for x in inner_freq_cumsum[1:-1]:
ax.vlines(x, 0, graph.shape[0] + 1, **axline_kws)
# ax.hlines(x, 0, graph.shape[1] + 1, **axline_kws)
# add specific lines for the borders of the plot
pad = 0.0001
low = pad
high = 1 - pad
ax.plot((low, low), (low, high), transform=ax.transAxes, **axline_kws)
ax.plot((low, high), (low, low), transform=ax.transAxes, **axline_kws)
ax.plot((high, high), (low, high), transform=ax.transAxes, **axline_kws)
ax.plot((low, high), (high, high), transform=ax.transAxes, **axline_kws)
# generic curve that we will use for everything
lx = np.linspace(-np.pi / 2.0 + 0.05, np.pi / 2.0 - 0.05, 500)
tan = np.tan(lx)
curve = np.hstack((tan[::-1], tan))
# divider = make_axes_locatable(ax)
# inner curve generation
inner_tick_loc = inner_freq.cumsum() - inner_freq / 2
inner_tick_width = inner_freq / 2
# outer curve generation
outer_tick_loc = outer_freq.cumsum() - outer_freq / 2
outer_tick_width = outer_freq / 2
# top inner curves
ax_x = divider.new_vertical(size="5%", pad=0.0, pack_start=True)
ax.figure.add_axes(ax_x)
_plot_brackets(
ax_x,
np.tile(inner_unique, len(outer_unique)),
inner_tick_loc,
inner_tick_width,
curve,
"inner",
"x",
graph.shape[1],
fontsize,
)
return ax
def _plot_brackets(
ax, group_names, tick_loc, tick_width, curve, level, axis, max_size, fontsize
):
for x0, width in zip(tick_loc, tick_width):
x = np.linspace(x0 - width, x0 + width, 1000)
if axis == "x":
ax.plot(x, curve, c="k")
ax.patch.set_alpha(0)
elif axis == "y":
ax.plot(curve, x, c="k")
ax.patch.set_alpha(0)
ax.set_yticks([])
ax.set_xticks([])
ax.tick_params(axis=axis, which="both", length=0, pad=7)
for direction in ["left", "right", "bottom", "top"]:
ax.spines[direction].set_visible(False)
if axis == "x":
ax.set_xticks(tick_loc)
ax.set_xticklabels(
group_names,
fontsize=fontsize,
verticalalignment="center",
horizontalalignment="right",
rotation=90,
rotation_mode="anchor",
)
# ax.xaxis.set_label_position("bottom")
# ax.xaxis.tick_top()
ax.xaxis.labelpad = 200
ax.set_xlim(0, max_size)
ax.tick_params(axis="x", which="major", pad=5 + fontsize / 4)
elif axis == "y":
ax.set_yticks(tick_loc)
ax.set_yticklabels(group_names, fontsize=fontsize, verticalalignment="center")
ax.set_ylim(0, max_size)
ax.invert_yaxis()
def incidence_plot(adj, classes, from_class):
"""Plots non-square adjacency, sorts by class label, sums input to columns for
marginal
Parameters
----------
adj : np.array
n x n adjacency matrix
classes : np.ndarray
n-length indicator of class membership for sorting nodes
from_class : str
which class to select on the left
Returns
-------
ax
matplotlib axes
"""
sort_inds = _sort_inds(adj, classes, np.ones_like(classes), True)
sort_adj = _sort_graph(adj, classes, np.ones_like(classes), True)
sort_classes = classes[sort_inds]
#
if not isinstance(from_class, list):
from_class = [from_class]
all_proj_inds = []
for i, class_name in enumerate(from_class):
print(class_name)
proj_inds = np.where(sort_classes == class_name)[0]
all_proj_inds += list(proj_inds)
print(all_proj_inds)
all_proj_inds = np.unique(all_proj_inds)
print(all_proj_inds)
# pred_cell_ids = np.setdiff1d(pred_cell_ids, PREDEFINED_IDS)
#
# proj_inds = np.where(sort_classes == from_class)[0]
clipped_adj = sort_adj[all_proj_inds, :]
plt.figure(figsize=(30, 10))
xs, ys = np.meshgrid(
range(1, clipped_adj.shape[1] + 1), range(1, clipped_adj.shape[0] + 1)
)
nonzero_inds = np.nonzero(clipped_adj.ravel())
x = xs.ravel()[nonzero_inds]
y = ys.ravel()[nonzero_inds]
weights = clipped_adj.ravel()[nonzero_inds]
ax = sns.scatterplot(x=x, y=y, size=weights, legend=False)
plt.ylabel(from_class)
plt.title(from_class, pad=100)
divider = make_axes_locatable(ax)
ax_top = divider.new_vertical(size="25%", pad=0.0, pack_start=False)
ax.figure.add_axes(ax_top)
sums = clipped_adj.sum(axis=0)
ax_top.bar(range(1, clipped_adj.shape[1] + 1), sums, width=5)
ax_top.set_xlim((0, clipped_adj.shape[1]))
ax_top.axis("off")
ax_top.hlines(0.05, 0, clipped_adj.shape[1] + 1, color="r", linestyle="--")
ax = _plot_groups(
ax, divider, clipped_adj, sort_inds, classes, outer_labels=None, fontsize=14
)
ax.set_xlim((0, clipped_adj.shape[1]))
ax.set_ylim((0, clipped_adj.shape[0]))
ax.axis("off")
return ax
def screeplot(
X,
title="Scree plot",
context="talk",
font_scale=1,
figsize=(10, 5),
cumulative=False,
show_first=40,
n_elbows=4,
):
r"""
Plots the distribution of singular values for a matrix, either showing the
raw distribution or an empirical CDF (depending on ``cumulative``)
Parameters
----------
X : np.ndarray (2D)
input matrix
title : string, default : 'Scree plot'
plot title
context : None, or one of {talk (default), paper, notebook, poster}
Seaborn plotting context
font_scale : float, optional, default: 1
Separate scaling factor to independently scale the size of the font
elements.
figsize : tuple of length 2, default (10, 5)
size of the figure (width, height)
cumulative : boolean, default: True
whether or not to plot a cumulative cdf of singular values
show_first : int or None, default: None
whether to restrict the plot to the first ``show_first`` components
Returns
-------
ax : matplotlib axis object
"""
_check_common_inputs(
figsize=figsize, title=title, context=context, font_scale=font_scale
)
# check_array(X)
if show_first is not None:
if not isinstance(show_first, int):
msg = "show_first must be an int"
raise TypeError(msg)
if not isinstance(cumulative, bool):
msg = "cumulative must be a boolean"
raise TypeError(msg)
# n_components = min(X.shape) - 1
# _, D, _ = selectSVD(X, n_components=n_components, algorithm="full")
if X.ndim == 1:
X = X.reshape(-1, 1)
elbow_locs, elbow_vals = select_dimension(X, n_elbows=n_elbows)
elbow_locs = np.array(elbow_locs)
elbow_vals = np.array(elbow_vals)
# D /= D.sum()
D = elbow_vals / elbow_vals.sum()
if cumulative:
y = np.cumsum(D[:show_first])
else:
y = D[:show_first]
_ = plt.figure(figsize=figsize)
ax = plt.gca()
xlabel = "Component"
if cumulative:
ylabel = "Variance explained"
else:
ylabel = "Normalized singular value"
with sns.plotting_context(context=context, font_scale=font_scale):
rng = range(1, len(y) + 1)
plt.scatter(rng, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.scatter(elbow_locs, y[elbow_locs - 1], c="r")
plt.ylim((y.min() - y.min() / 10, y.max() + (y.max() / 10)))
return ax
def _check_common_inputs(
figsize=None,
height=None,
title=None,
context=None,
font_scale=None,
legend_name=None,
title_pad=None,
hier_label_fontsize=None,
):
# Handle figsize
if figsize is not None:
if not isinstance(figsize, tuple):
msg = "figsize must be a tuple, not {}.".format(type(figsize))
raise TypeError(msg)
# Handle heights
if height is not None:
if not isinstance(height, (int, float)):
msg = "height must be an integer or float, not {}.".format(type(height))
raise TypeError(msg)
# Handle title
if title is not None:
if not isinstance(title, str):
msg = "title must be a string, not {}.".format(type(title))
raise TypeError(msg)
# Handle context
if context is not None:
if not isinstance(context, str):
msg = "context must be a string, not {}.".format(type(context))
raise TypeError(msg)
elif context not in ["paper", "notebook", "talk", "poster"]:
msg = "context must be one of (paper, notebook, talk, poster), \
not {}.".format(
context
)
raise ValueError(msg)
# Handle font_scale
if font_scale is not None:
if not isinstance(font_scale, (int, float)):
msg = "font_scale must be an integer or float, not {}.".format(
type(font_scale)
)
raise TypeError(msg)
| |
torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 4e-2)
@tooslow
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# head_mask and decoder_head_mask has different shapes than other input args
if "head_mask" in inputs_dict:
del inputs_dict["head_mask"]
if "decoder_head_mask" in inputs_dict:
del inputs_dict["decoder_head_mask"]
if "cross_attn_head_mask" in inputs_dict:
del inputs_dict["cross_attn_head_mask"]
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
max_input = getattr(self.model_tester, "max_position_embeddings", 512)
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, max_input),
name="decoder_input_ids",
dtype="int32",
),
"input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
}
elif model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids", None)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(min(out_len % 2, out_len % 5), 0) # differentiation due to newly added cross_attentions
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_headmasking(self):
if not self.test_head_masking:
return
random.Random().seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
random.Random().seed()
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
for model_class in self.all_model_classes:
model = model_class(config=configs_no_init)
# Prepare head_mask
def prepare_layer_head_mask(i, attention_heads, num_hidden_layers):
if i == 0:
return tf.concat(
(tf.zeros(1, dtype=tf.float32), tf.ones(attention_heads - 1, dtype=tf.float32)), 0
)
elif i == num_hidden_layers - 1:
return tf.concat(
(tf.zeros(attention_heads - 1, dtype=tf.float32), tf.ones(1, dtype=tf.float32)), 0
)
else:
return tf.ones(attention_heads, dtype=tf.float32)
head_mask = tf.stack(
[
prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers)
for i in range(config.num_hidden_layers)
],
0,
)
inputs = self._prepare_for_class(inputs_dict, model_class).copy()
inputs["head_mask"] = head_mask
if model.config.is_encoder_decoder:
signature = inspect.signature(model.call)
arg_names = [*signature.parameters.keys()]
if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model
inputs["decoder_head_mask"] = head_mask
if "cross_attn_head_mask" in arg_names:
inputs["cross_attn_head_mask"] = head_mask
outputs = model(**inputs, return_dict=True)
def check_attentions_validity(attentions):
# Remove Nan
for t in attentions:
self.assertLess(
(tf.math.reduce_sum(tf.cast(tf.math.is_nan(t), tf.float32))).numpy(), (tf.size(t) / 4).numpy()
) # Check we don't have more than 25% nans (arbitrary)
attentions = [
tf.where(tf.math.is_nan(t), 0.0, t) for t in attentions
] # remove them (the test is less complete)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[0][..., 0, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[0][..., -1, :, :]).numpy(), 0.0)
if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules
self.assertNotEqual(tf.math.reduce_sum(attentions[1][..., 0, :, :]).numpy(), 0.0)
self.assertAlmostEqual(tf.math.reduce_sum(attentions[-1][..., -2, :, :]).numpy(), 0.0)
self.assertNotEqual(tf.math.reduce_sum(attentions[-1][..., -1, :, :]).numpy(), 0.0)
if model.config.is_encoder_decoder:
check_attentions_validity(outputs.encoder_attentions)
check_attentions_validity(outputs.decoder_attentions)
if "cross_attn_head_mask" in arg_names:
check_attentions_validity(outputs.cross_attentions)
else:
check_attentions_validity(outputs.attentions)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = (
get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING)
+ get_values(TF_MODEL_FOR_MASKED_LM_MAPPING)
+ get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
)
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
inputs = self._prepare_for_class(inputs, model_class)
model(inputs)
@tooslow
def test_graph_mode_with_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
| |
LB_ID, LISTENER_ID, APP_PROFILE_ID,
LB_VS_ID)
mock_create_pp.assert_called_once()
mock_successful_completion = (
self.lbv2_driver.listener.successful_completion)
mock_successful_completion.assert_called_with(self.context,
listener,
delete=False)
def test_create_listener_with_session_persistence_fail(self):
listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1', 'Dummy',
self.pool_persistency.id,
LB_ID, 'TCP', protocol_port=80,
loadbalancer=self.lb,
default_pool=self.pool_persistency)
with mock.patch.object(self.core_plugin, 'get_floatingips'
) as mock_get_floatingips, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
) as mock_get_lb_binding, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding:
mock_get_floatingips.return_value = []
mock_get_lb_binding.return_value = LB_BINDING
mock_get_pool_binding.return_value = None
self.assertRaises(n_exc.BadRequest,
self.edge_driver.listener.create,
self.context, listener)
def test_update(self):
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1-new', 'new-description',
None, LB_ID, protocol_port=80,
loadbalancer=self.lb)
with mock.patch.object(self.core_plugin, 'get_floatingips'
) as mock_get_floatingips, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding:
mock_get_floatingips.return_value = []
mock_get_listener_binding.return_value = LISTENER_BINDING
self.edge_driver.listener.update(self.context, self.listener,
new_listener)
mock_successful_completion = (
self.lbv2_driver.listener.successful_completion)
mock_successful_completion.assert_called_with(self.context,
new_listener,
delete=False)
def test_update_with_default_pool(self):
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1-new', 'new-description',
self.pool, LB_ID, protocol_port=80,
loadbalancer=self.lb,
default_pool=self.pool)
with mock.patch.object(self.core_plugin, 'get_floatingips'
) as mock_get_floatingips, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding,\
mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding,\
mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'):
mock_get_floatingips.return_value = []
mock_get_listener_binding.return_value = LISTENER_BINDING
mock_get_pool_binding.return_value = POOL_BINDING
self.edge_driver.listener.update(self.context, self.listener,
new_listener)
mock_successful_completion = (
self.lbv2_driver.listener.successful_completion)
mock_successful_completion.assert_called_with(self.context,
new_listener,
delete=False)
def test_update_with_session_persistence(self):
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1-new', 'new-description',
self.pool_persistency.id,
LB_ID, protocol='HTTP',
protocol_port=80,
loadbalancer=self.lb,
default_pool=self.pool_persistency)
with mock.patch.object(self.core_plugin, 'get_floatingips'
) as mock_get_floatingips, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding,\
mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding,\
mock.patch.object(self.vs_client, 'update',
return_value={'id': LB_VS_ID}), \
mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'):
mock_get_floatingips.return_value = []
mock_get_listener_binding.return_value = LISTENER_BINDING
mock_get_pool_binding.return_value = POOL_BINDING
self.edge_driver.listener.update(self.context, self.listener,
new_listener)
mock_create_pp.assert_called_once()
mock_successful_completion = (
self.lbv2_driver.listener.successful_completion)
mock_successful_completion.assert_called_with(self.context,
new_listener,
delete=False)
def test_update_with_session_persistence_change(self):
old_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1', 'description',
self.pool_persistency.id,
LB_ID, protocol='HTTP',
protocol_port=80,
loadbalancer=self.lb,
default_pool=self.pool_persistency)
sess_persistence = lb_models.SessionPersistence(
POOL_ID, 'SOURCE_IP')
pool_persistency = lb_models.Pool('new_pool_id', LB_TENANT_ID,
'pool1', '', None, 'HTTP',
'ROUND_ROBIN', loadbalancer_id=LB_ID,
listener=self.listener,
listeners=[self.listener],
loadbalancer=self.lb,
session_persistence=sess_persistence)
new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID,
'listener1-new', 'new-description',
pool_persistency.id,
LB_ID, protocol='HTTP',
protocol_port=80,
loadbalancer=self.lb,
default_pool=pool_persistency)
with mock.patch.object(self.core_plugin, 'get_floatingips'
) as mock_get_floatingips, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding,\
mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding, \
mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(self.pp_client, 'delete'
) as mock_delete_pp, \
mock.patch.object(lb_utils, 'get_pool_tags'
) as mock_get_pool_tags, \
mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'):
mock_get_pool_tags.return_value = []
mock_get_floatingips.return_value = []
mock_get_listener_binding.return_value = LISTENER_BINDING
mock_get_pool_binding.return_value = POOL_BINDING
self.edge_driver.listener.update(
self.context, old_listener,
new_listener)
mock_create_pp.assert_called_once_with(
display_name='persistence_pool1_new_p...ol_id',
resource_type='LbSourceIpPersistenceProfile',
tags=mock.ANY)
# No reason to check parameters here, it's
# all mocked out
mock_delete_pp.assert_called_once()
def test_delete(self):
with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
) as mock_get_lb_binding, \
mock.patch.object(self.service_client, 'get'
) as mock_get_lb_service, \
mock.patch.object(self.service_client, 'remove_virtual_server'
) as mock_remove_virtual_server, \
mock.patch.object(self.app_client, 'delete'
) as mock_delete_app_profile, \
mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id'
) as mock_get_neutron_from_nsx_router_id, \
mock.patch.object(self.vs_client, 'delete'
) as mock_delete_virtual_server, \
mock.patch.object(nsx_db, 'delete_nsx_lbaas_listener_binding',
) as mock_delete_listener_binding:
mock_get_listener_binding.return_value = LISTENER_BINDING
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
mock_get_lb_binding.return_value = LB_BINDING
mock_get_lb_service.return_value = {
'id': LB_SERVICE_ID,
'virtual_server_ids': [LB_VS_ID]}
self.edge_driver.listener.delete(self.context, self.listener)
mock_remove_virtual_server.assert_called_with(LB_SERVICE_ID,
LB_VS_ID)
mock_delete_virtual_server.assert_called_with(LB_VS_ID)
mock_delete_app_profile.assert_called_with(APP_PROFILE_ID)
mock_delete_listener_binding.assert_called_with(
self.context.session, LB_ID, LISTENER_ID)
mock_successful_completion = (
self.lbv2_driver.listener.successful_completion)
mock_successful_completion.assert_called_with(self.context,
self.listener,
delete=True)
class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2):
def setUp(self):
super(TestEdgeLbaasV2Pool, self).setUp()
@property
def _tested_entity(self):
return 'pool'
def test_create(self):
with mock.patch.object(self.pool_client, 'create'
) as mock_create_pool, \
mock.patch.object(nsx_db, 'add_nsx_lbaas_pool_binding'
) as mock_add_pool_binding, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding, \
mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(self.vs_client, 'update', return_value=None
) as mock_vs_update, \
mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'
) as mock_update_pool_binding:
mock_create_pool.return_value = {'id': LB_POOL_ID}
mock_get_listener_binding.return_value = LISTENER_BINDING
self.edge_driver.pool.create(self.context, self.pool)
mock_add_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID, LB_POOL_ID)
mock_create_pp.assert_not_called()
mock_vs_update.assert_called_once_with(
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=None)
mock_update_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID, LB_VS_ID)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(self.context,
self.pool,
delete=False)
def _test_create_with_persistency(self, vs_data, verify_func):
with mock.patch.object(self.pool_client, 'create'
) as mock_create_pool, \
mock.patch.object(nsx_db, 'add_nsx_lbaas_pool_binding'
) as mock_add_pool_binding, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding'
) as mock_get_listener_binding, \
mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(self.pp_client, 'update', return_value=None,
) as mock_update_pp, \
mock.patch.object(self.vs_client, 'get'
) as mock_vs_get, \
mock.patch.object(self.vs_client, 'update', return_value=None
) as mock_vs_update, \
mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'
) as mock_update_pool_binding:
mock_vs_get.return_value = vs_data
mock_create_pool.return_value = {'id': LB_POOL_ID}
mock_create_pp.return_value = {'id': LB_PP_ID}
mock_get_listener_binding.return_value = LISTENER_BINDING
self.edge_driver.pool.create(self.context, self.pool_persistency)
mock_add_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID, LB_POOL_ID)
verify_func(mock_create_pp, mock_update_pp,
mock_update_pool_binding, mock_vs_update)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(
self.context, self.pool_persistency, delete=False)
def test_create_with_persistency(self):
def verify_func(mock_create_pp, mock_update_pp,
mock_update_pool_binding, mock_vs_update):
mock_create_pp.assert_called_once_with(
resource_type='LbCookiePersistenceProfile',
cookie_mode='INSERT',
cookie_name='meh_cookie',
display_name=mock.ANY,
tags=mock.ANY)
mock_update_pp.assert_not_called()
mock_update_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID, LB_VS_ID)
mock_vs_update.assert_called_once_with(
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID)
vs_data = {'id': LB_VS_ID}
self._test_create_with_persistency(vs_data, verify_func)
def test_create_with_persistency_existing_profile(self):
def verify_func(mock_create_pp, mock_update_pp,
mock_update_pool_binding, mock_vs_update):
mock_create_pp.assert_not_called()
mock_update_pp.assert_called_once_with(
LB_PP_ID,
resource_type='LbCookiePersistenceProfile',
cookie_mode='INSERT',
cookie_name='meh_cookie',
display_name=mock.ANY,
tags=mock.ANY)
mock_update_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID, LB_VS_ID)
mock_vs_update.assert_called_once_with(
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID)
vs_data = {'id': LB_VS_ID,
'persistence_profile_id': LB_PP_ID}
self._test_create_with_persistency(vs_data, verify_func)
def test_create_with_persistency_no_listener(self):
def verify_func(mock_create_pp, mock_update_pp,
mock_update_pool_binding, mock_vs_update):
mock_create_pp.assert_not_called()
mock_update_pp.assert_not_called()
mock_update_pool_binding.assert_not_called()
mock_vs_update.assert_not_called()
vs_data = {'id': LB_VS_ID,
'persistence_profile_id': LB_PP_ID}
self.pool_persistency.listener = None
self.pool_persistency.listeners = []
self._test_create_with_persistency(vs_data, verify_func)
def test_create_multiple_listeners(self):
"""Verify creation will fail if multiple listeners are set"""
pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '',
None, 'HTTP', 'ROUND_ROBIN',
loadbalancer_id=LB_ID,
listeners=[self.listener,
self.https_listener],
loadbalancer=self.lb)
self.assertRaises(n_exc.BadRequest,
self.edge_driver.pool.create,
self.context, pool)
def test_update(self):
new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '',
None, 'HTTP', 'LEAST_CONNECTIONS',
listener=self.listener)
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding:
mock_get_pool_binding.return_value = POOL_BINDING
self.edge_driver.pool.update(self.context, self.pool, new_pool)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(self.context,
new_pool,
delete=False)
def test_update_multiple_listeners(self):
"""Verify update action will fail if multiple listeners are set"""
new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '',
None, 'HTTP', 'ROUND_ROBIN',
loadbalancer_id=LB_ID,
listeners=[self.listener,
self.https_listener],
loadbalancer=self.lb)
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding:
mock_get_pool_binding.return_value = POOL_BINDING
self.assertRaises(n_exc.BadRequest,
self.edge_driver.pool.update,
self.context, self.pool, new_pool)
def _test_update_with_persistency(self, vs_data, old_pool, new_pool,
verify_func):
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding, \
mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(self.pp_client, 'update', return_value=None,
) as mock_update_pp, \
mock.patch.object(self.pp_client, 'delete', return_value=None,
) as mock_delete_pp, \
mock.patch.object(self.vs_client, 'get'
) as mock_vs_get, \
mock.patch.object(self.vs_client, 'update', return_value=None
) as mock_vs_update:
mock_vs_get.return_value = vs_data
mock_get_pool_binding.return_value = POOL_BINDING
mock_create_pp.return_value = {'id': LB_PP_ID}
self.edge_driver.pool.update(self.context, old_pool, new_pool)
verify_func(mock_create_pp, mock_update_pp,
mock_delete_pp, mock_vs_update)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(
self.context, new_pool, delete=False)
def test_update_with_persistency(self):
def verify_func(mock_create_pp, mock_update_pp,
mock_delete_pp, mock_vs_update):
mock_create_pp.assert_called_once_with(
resource_type='LbCookiePersistenceProfile',
cookie_mode='INSERT',
cookie_name='meh_cookie',
display_name=mock.ANY,
tags=mock.ANY)
mock_update_pp.assert_not_called()
mock_delete_pp.assert_not_called()
mock_vs_update.assert_called_once_with(
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID)
vs_data = {'id': LB_VS_ID}
self._test_update_with_persistency(vs_data, self.pool,
self.pool_persistency, verify_func)
def test_update_remove_persistency(self):
def verify_func(mock_create_pp, mock_update_pp,
mock_delete_pp, mock_vs_update):
mock_create_pp.assert_not_called()
mock_update_pp.assert_not_called()
mock_delete_pp.assert_called_with(LB_PP_ID)
mock_vs_update.assert_called_once_with(
LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=None)
vs_data = {'id': LB_VS_ID,
'persistence_profile_id': LB_PP_ID}
self._test_update_with_persistency(vs_data, self.pool_persistency,
self.pool, verify_func)
def test_delete(self):
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding, \
mock.patch.object(self.vs_client, 'update', return_value=None
) as mock_update_virtual_server, \
mock.patch.object(self.pool_client, 'delete'
) as mock_delete_pool, \
mock.patch.object(nsx_db, 'delete_nsx_lbaas_pool_binding'
) as mock_delete_pool_binding, \
mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id'
) as mock_get_neutron_from_nsx_router_id, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
) as mock_get_lb_binding:
mock_get_pool_binding.return_value = POOL_BINDING
mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID
mock_get_lb_binding.return_value = None
self.edge_driver.pool.delete(self.context, self.pool)
mock_update_virtual_server.assert_called_with(
LB_VS_ID, persistence_profile_id=None, pool_id=None)
mock_delete_pool.assert_called_with(LB_POOL_ID)
mock_delete_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(self.context,
self.pool,
delete=True)
def test_delete_with_persistency(self):
with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding'
) as mock_get_pool_binding, \
mock.patch.object(self.vs_client, 'get'
) as mock_vs_get, \
mock.patch.object(self.vs_client, 'update', return_value=None
) as mock_update_virtual_server, \
mock.patch.object(self.pool_client, 'delete'
) as mock_delete_pool, \
mock.patch.object(self.pp_client, 'delete', return_value=None,
) as mock_delete_pp, \
mock.patch.object(nsx_db, 'delete_nsx_lbaas_pool_binding'
) as mock_delete_pool_binding, \
mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'
) as mock_get_lb_binding:
mock_get_pool_binding.return_value = POOL_BINDING
mock_get_lb_binding.return_value = None
mock_vs_get.return_value = {'id': LB_VS_ID,
'persistence_profile_id': LB_PP_ID}
self.edge_driver.pool.delete(self.context, self.pool_persistency)
mock_delete_pp.assert_called_once_with(LB_PP_ID)
mock_update_virtual_server.assert_called_once_with(
LB_VS_ID, persistence_profile_id=None, pool_id=None)
mock_delete_pool.assert_called_with(LB_POOL_ID)
mock_delete_pool_binding.assert_called_with(
self.context.session, LB_ID, POOL_ID)
mock_successful_completion = (
self.lbv2_driver.pool.successful_completion)
mock_successful_completion.assert_called_with(
self.context, self.pool_persistency, delete=True)
def _verify_create(self, res_type, cookie_name, cookie_mode,
mock_create_pp, mock_update_pp):
if cookie_name:
mock_create_pp.assert_called_once_with(
resource_type=res_type,
cookie_name=cookie_name,
cookie_mode=cookie_mode,
display_name=mock.ANY,
tags=mock.ANY)
else:
mock_create_pp.assert_called_once_with(
resource_type=res_type,
display_name=mock.ANY,
tags=mock.ANY)
# Compare tags - kw args are the last item of a mock call tuple
self.assertItemsEqual(mock_create_pp.mock_calls[0][-1]['tags'],
[{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'},
{'scope': 'os-lbaas-lb-name', 'tag': 'lb1'},
{'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}])
mock_update_pp.assert_not_called()
def _verify_update(self, res_type, cookie_name, cookie_mode,
mock_create_pp, mock_update_pp):
if cookie_name:
mock_update_pp.assert_called_once_with(
LB_PP_ID,
resource_type=res_type,
cookie_name=cookie_name,
cookie_mode=cookie_mode,
display_name=mock.ANY,
tags=mock.ANY)
else:
mock_update_pp.assert_called_once_with(
LB_PP_ID,
resource_type=res_type,
display_name=mock.ANY,
tags=mock.ANY)
# Compare tags - kw args are the last item of a mock call tuple
self.assertItemsEqual(mock_update_pp.mock_calls[0][-1]['tags'],
[{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'},
{'scope': 'os-lbaas-lb-name', 'tag': 'lb1'},
{'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}])
mock_create_pp.assert_not_called()
def _verify_switch(self, res_type, cookie_name, cookie_mode,
mock_create_pp, mock_update_pp):
if cookie_name:
mock_create_pp.assert_called_once_with(
resource_type=res_type,
cookie_name=cookie_name,
cookie_mode=cookie_mode,
display_name=mock.ANY,
tags=mock.ANY)
else:
mock_create_pp.assert_called_once_with(
LB_PP_ID,
resource_type=res_type,
display_name=mock.ANY,
tags=mock.ANY)
# Compare tags - kw args are the last item of a mock call tuple
self.assertItemsEqual(mock_create_pp.mock_calls[0][-1]['tags'],
[{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'},
{'scope': 'os-lbaas-lb-name', 'tag': 'lb1'},
{'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}])
def _verify_delete(self, res_type, cookie_name, cookie_mode,
mock_create_pp, mock_update_pp):
# do not check delete mock as deletion is not done in
# setup_session_persistence
mock_create_pp.assert_not_called()
mock_update_pp.assert_not_called()
def _test_setup_session_persistence(self, session_persistence,
res_type, vs_data, verify_func,
cookie_name=None, cookie_mode=None,
switch_type=False):
with mock.patch.object(self.pp_client, 'create'
) as mock_create_pp, \
mock.patch.object(self.pp_client, 'update', return_value=None,
) as mock_update_pp:
mock_create_pp.return_value = {'id': LB_PP_ID}
self.pool.session_persistence = session_persistence
pool_dict = self.edge_driver.pool.translator(self.pool)
list_dict = self.edge_driver.listener.translator(self.listener)
pp_id, post_func = lb_utils.setup_session_persistence(
self.nsxlib, pool_dict, [], switch_type, list_dict,
vs_data)
if session_persistence:
self.assertEqual(LB_PP_ID, pp_id)
else:
self.assertIsNone(pp_id)
if not session_persistence or switch_type:
# Also verify post_func for delete
self.assertEqual((self.nsxlib, LB_PP_ID,),
post_func.args)
verify_func(res_type, cookie_name, cookie_mode,
mock_create_pp, mock_update_pp)
def test_setup_session_persistence_sourceip_new_profile(self):
sess_persistence = lb_models.SessionPersistence(POOL_ID, 'SOURCE_IP')
res_type = 'LbSourceIpPersistenceProfile'
self._test_setup_session_persistence(
sess_persistence, | |
color:#a5c261;">\'color\'</span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#a9b7c6;">: [</span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#6897bb;">40</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#c' \
'c7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">40</span><' \
'span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-' \
'family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">40</span><span style=" font-family:\'Consolas\'' \
'; font-size:9.8pt; color:#a9b7c6;">]}]} </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#808080;"># Other buttons can be added to the list if desired<br /><br />###############' \
'#<br /># inputWidgets #<br />################<br /># Stretch<br /></span><span style=" font-family:\'' \
'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{</span><span style=" font-family:\'Consolas\'; font-s' \
'ize:9.8pt; color:#a5c261;">\'type\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'s' \
'tretch\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><sp' \
'an style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#a5c261;">\'Stretch Widget\'</span><span style=" font-family:\'Cons' \
'olas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:' \
'9.8pt; color:#a5c261;">\'share\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; colo' \
'r:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False<' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">}<br /><br /></span><' \
'span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#808080;"># Spacer<br /></span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{</span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'type\'</span><span style=" font-family:\'Consolas\'' \
'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt;' \
' color:#a5c261;">\'spacer\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc' \
'7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'' \
'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style' \
'=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" ' \
'font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Cons' \
'olas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span><span style=" font-family:\'Consolas\'; fon' \
't-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; colo' \
'r:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, <' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'size\'</span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10</span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a9b7c6;">}<br /><br /></span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#808080;"># Separator<br /></span><span style=" font-family:\'Consolas\'; font-size:' \
'9.8pt; color:#a9b7c6;">{</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c26' \
'1;">\'type\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span' \
'><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'separator\'</span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" font-family:\'Consolas\'; font-size:9.8p' \
't; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;"' \
'>\'share\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><' \
'span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#a5c261;">\'vertical\'</span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; col' \
'or:#a5c261;">\'\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">}<b' \
'r /><br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#808080;"># LineEdit' \
'<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">{</span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'type\'</span><span style=" font' \
'-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas' \
'\'; font-size:9.8pt; color:#a5c261;">\'lineEdit\'</span><span style=" font-family:\'Consolas\'; font' \
'-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color' \
':#a5c261;">\'label\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">' \
': </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Line Edit:\'</sp' \
'an><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" f' \
'ont-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" font-family:\'Consolas\'; font-' \
'size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#a5c261;">\'color\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">:' \
' [</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">10</span><span sty' \
'le=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'' \
'Consolas\'; font-size:9.8pt; color:#6897bb;">20</span><span style=" font-family:\'Consolas\'; font-' \
'size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#6897bb;">30</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">]</span>' \
'<span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font' \
'-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span><span style=" font-family:\'C' \
'onsolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color' \
':#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'save\'' \
'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span styl' \
'e=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family' \
':\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; fon' \
't-size:9.8pt; color:#a5c261;">\'text\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt' \
'; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">' \
'\'\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span s' \
'tyle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'placeholderText\'</span><span st' \
'yle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Placeholder\'</span><span style=" font-family:\'Con' \
'solas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size' \
':9.8pt; color:#a5c261;">\'eval\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; colo' \
'r:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False<' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'errorIfEmpty\'</span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size' \
':9.8pt; color:#a9b7c6;">}<br /><br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt;' \
' color:#808080;"># CmdLineEdit<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8pt; ' \
'color:#a9b7c6;">{</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'t' \
'ype\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'cmdLineEdit\'</span><span style' \
'=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'C' \
'onsolas\'; font-size:9.8pt; color:#a5c261;">\'label\'</span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; c' \
'olor:#a5c261;">\'Cmd Line Edit:\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; col' \
'or:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'too' \
'lTip\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span' \
' style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span st' \
'yle=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'color\'</span><span style=" font-family:\'Consolas\'' \
'; font-size:9.8pt; color:#a9b7c6;">: [</span><span style=" font-family:\'Consolas\'; font-size:9.8p' \
't; color:#6897bb;">10</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;"' \
'>, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">20</span><span st' \
'yle=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#6897bb;">30</span><span style=" font-family:\'Consolas\'; font' \
'-size:9.8pt; color:#a9b7c6;">]</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'' \
'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span styl' \
'e=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family' \
':\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; fon' \
't-size:9.8pt; color:#a5c261;">\'save\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt' \
'; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">' \
'False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span ' \
'style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'text\'</span><span style=" font' \
'-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas' \
'\'; font-size:9.8pt; color:#a5c261;">\'\'</span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261' \
';">\'placeholderText\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;' \
'">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Placeholder\'<' \
'/span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'eval\'</span><span style=" font-famil' \
'y:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; ' \
'color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'' \
'errorIfEmpty\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </sp' \
'an><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#a5c261;">\'buttonCommand\'</span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#8888c6;">None</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7' \
'832;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'buttonLabe' \
'l\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span st' \
'yle=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'Run\'</span><span style=" font-fa' \
'mily:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\';' \
' font-size:9.8pt; color:#a5c261;">\'buttonToolTip\'</span><span style=" font-family:\'Consolas\'; fo' \
'nt-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; col' \
'or:#a5c261;">\'Run the command and return the value\'</span><span style=" font-family:\'Consolas\'; ' \
'font-size:9.8pt; color:#a9b7c6;">}<br /><br /></span><span style=" font-family:\'Consolas\'; font-si' \
'ze:9.8pt; color:#808080;"># Browse<br /></span><span style=" font-family:\'Consolas\'; font-size:9.8' \
'pt; color:#a9b7c6;">{</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;"' \
'>\'type\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><s' \
'pan style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'browse\'</span><span style=' \
'" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Co' \
'nsolas\'; font-size:9.8pt; color:#a5c261;">\'label\'</span><span style=" font-family:\'Consolas\'; f' \
'ont-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; co' \
'lor:#a5c261;">\'Browse:\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc78' \
'32;">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'toolTip\'</' \
'span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style="' \
' font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'This is a tip\'</span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#a5c261;">\'color\'</span><span style=" font-family:\'Consolas\'; font-' \
'size:9.8pt; color:#a9b7c6;">: [</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color' \
':#6897bb;">10</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </spa' \
'n><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#6897bb;">20</span><span style=" fo' \
'nt-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consol' \
'as\'; font-size:9.8pt; color:#6897bb;">30</span><span style=" font-family:\'Consolas\'; font-size:9.' \
'8pt; color:#a9b7c6;">]</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;' \
'">, </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'share\'</span>' \
'<span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font' \
'-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</span><span style=" font-family:\'Conso' \
'las\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" font-family:\'Consolas\'; font-size:9' \
'.8pt; color:#a5c261;">\'save\'</span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:' \
'#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#8888c6;">False</s' \
'pan><span style=" font-family:\'Consolas\'; font-size:9.8pt; color:#cc7832;">, </span><span style=" ' \
'font-family:\'Consolas\'; font-size:9.8pt; color:#a5c261;">\'text\'</span><span style=" font-family:' \
'\'Consolas\'; font-size:9.8pt; color:#a9b7c6;">: </span><span style=" font-family:\'Consolas\'; font' \
'-size:9.8pt; | |
/ SOURCES_DIR_NAME},
'LOGS_DIR': {'default': lambda c: c['OUTPUT_DIR'] / LOGS_DIR_NAME},
'CONFIG_FILE': {'default': lambda c: Path(c['CONFIG_FILE']).resolve() if c['CONFIG_FILE'] else c['OUTPUT_DIR'] / CONFIG_FILENAME},
'COOKIES_FILE': {'default': lambda c: c['COOKIES_FILE'] and Path(c['COOKIES_FILE']).resolve()},
'CHROME_USER_DATA_DIR': {'default': lambda c: find_chrome_data_dir() if c['CHROME_USER_DATA_DIR'] is None else (Path(c['CHROME_USER_DATA_DIR']).resolve() if c['CHROME_USER_DATA_DIR'] else None)}, # None means unset, so we autodetect it with find_chrome_Data_dir(), but emptystring '' means user manually set it to '', and we should store it as None
'URL_BLACKLIST_PTN': {'default': lambda c: c['URL_BLACKLIST'] and re.compile(c['URL_BLACKLIST'] or '', re.IGNORECASE | re.UNICODE | re.MULTILINE)},
'URL_WHITELIST_PTN': {'default': lambda c: c['URL_WHITELIST'] and re.compile(c['URL_WHITELIST'] or '', re.IGNORECASE | re.UNICODE | re.MULTILINE)},
'DIR_OUTPUT_PERMISSIONS': {'default': lambda c: c['OUTPUT_PERMISSIONS'].replace('6', '7').replace('4', '5')},
'ARCHIVEBOX_BINARY': {'default': lambda c: sys.argv[0] or bin_path('archivebox')},
'VERSION': {'default': lambda c: json.loads((Path(c['PACKAGE_DIR']) / 'package.json').read_text(encoding='utf-8').strip())['version']},
'PYTHON_BINARY': {'default': lambda c: sys.executable},
'PYTHON_ENCODING': {'default': lambda c: sys.stdout.encoding.upper()},
'PYTHON_VERSION': {'default': lambda c: '{}.{}.{}'.format(*sys.version_info[:3])},
'DJANGO_BINARY': {'default': lambda c: django.__file__.replace('__init__.py', 'bin/django-admin.py')},
'DJANGO_VERSION': {'default': lambda c: '{}.{}.{} {} ({})'.format(*django.VERSION)},
'USE_CURL': {'default': lambda c: c['USE_CURL'] and (c['SAVE_FAVICON'] or c['SAVE_TITLE'] or c['SAVE_ARCHIVE_DOT_ORG'])},
'CURL_VERSION': {'default': lambda c: bin_version(c['CURL_BINARY']) if c['USE_CURL'] else None},
'CURL_USER_AGENT': {'default': lambda c: c['CURL_USER_AGENT'].format(**c)},
'CURL_ARGS': {'default': lambda c: c['CURL_ARGS'] or []},
'SAVE_FAVICON': {'default': lambda c: c['USE_CURL'] and c['SAVE_FAVICON']},
'SAVE_ARCHIVE_DOT_ORG': {'default': lambda c: c['USE_CURL'] and c['SAVE_ARCHIVE_DOT_ORG']},
'USE_WGET': {'default': lambda c: c['USE_WGET'] and (c['SAVE_WGET'] or c['SAVE_WARC'])},
'WGET_VERSION': {'default': lambda c: bin_version(c['WGET_BINARY']) if c['USE_WGET'] else None},
'WGET_AUTO_COMPRESSION': {'default': lambda c: wget_supports_compression(c) if c['USE_WGET'] else False},
'WGET_USER_AGENT': {'default': lambda c: c['WGET_USER_AGENT'].format(**c)},
'SAVE_WGET': {'default': lambda c: c['USE_WGET'] and c['SAVE_WGET']},
'SAVE_WARC': {'default': lambda c: c['USE_WGET'] and c['SAVE_WARC']},
'WGET_ARGS': {'default': lambda c: c['WGET_ARGS'] or []},
'RIPGREP_VERSION': {'default': lambda c: bin_version(c['RIPGREP_BINARY']) if c['USE_RIPGREP'] else None},
'USE_SINGLEFILE': {'default': lambda c: c['USE_SINGLEFILE'] and c['SAVE_SINGLEFILE']},
'SINGLEFILE_VERSION': {'default': lambda c: bin_version(c['SINGLEFILE_BINARY']) if c['USE_SINGLEFILE'] else None},
'USE_READABILITY': {'default': lambda c: c['USE_READABILITY'] and c['SAVE_READABILITY']},
'READABILITY_VERSION': {'default': lambda c: bin_version(c['READABILITY_BINARY']) if c['USE_READABILITY'] else None},
'USE_MERCURY': {'default': lambda c: c['USE_MERCURY'] and c['SAVE_MERCURY']},
'MERCURY_VERSION': {'default': lambda c: '1.0.0' if shutil.which(str(bin_path(c['MERCURY_BINARY']))) else None}, # mercury is unversioned
'USE_GIT': {'default': lambda c: c['USE_GIT'] and c['SAVE_GIT']},
'GIT_VERSION': {'default': lambda c: bin_version(c['GIT_BINARY']) if c['USE_GIT'] else None},
'SAVE_GIT': {'default': lambda c: c['USE_GIT'] and c['SAVE_GIT']},
'USE_YOUTUBEDL': {'default': lambda c: c['USE_YOUTUBEDL'] and c['SAVE_MEDIA']},
'YOUTUBEDL_VERSION': {'default': lambda c: bin_version(c['YOUTUBEDL_BINARY']) if c['USE_YOUTUBEDL'] else None},
'SAVE_MEDIA': {'default': lambda c: c['USE_YOUTUBEDL'] and c['SAVE_MEDIA']},
'YOUTUBEDL_ARGS': {'default': lambda c: c['YOUTUBEDL_ARGS'] or []},
'CHROME_BINARY': {'default': lambda c: c['CHROME_BINARY'] or find_chrome_binary()},
'USE_CHROME': {'default': lambda c: c['USE_CHROME'] and c['CHROME_BINARY'] and (c['SAVE_PDF'] or c['SAVE_SCREENSHOT'] or c['SAVE_DOM'] or c['SAVE_SINGLEFILE'])},
'CHROME_VERSION': {'default': lambda c: bin_version(c['CHROME_BINARY']) if c['USE_CHROME'] else None},
'SAVE_PDF': {'default': lambda c: c['USE_CHROME'] and c['SAVE_PDF']},
'SAVE_SCREENSHOT': {'default': lambda c: c['USE_CHROME'] and c['SAVE_SCREENSHOT']},
'SAVE_DOM': {'default': lambda c: c['USE_CHROME'] and c['SAVE_DOM']},
'SAVE_SINGLEFILE': {'default': lambda c: c['USE_CHROME'] and c['SAVE_SINGLEFILE'] and c['USE_NODE']},
'SAVE_READABILITY': {'default': lambda c: c['USE_READABILITY'] and c['USE_NODE']},
'SAVE_MERCURY': {'default': lambda c: c['USE_MERCURY'] and c['USE_NODE']},
'USE_NODE': {'default': lambda c: c['USE_NODE'] and (c['SAVE_READABILITY'] or c['SAVE_SINGLEFILE'] or c['SAVE_MERCURY'])},
'NODE_VERSION': {'default': lambda c: bin_version(c['NODE_BINARY']) if c['USE_NODE'] else None},
'DEPENDENCIES': {'default': lambda c: get_dependency_info(c)},
'CODE_LOCATIONS': {'default': lambda c: get_code_locations(c)},
'EXTERNAL_LOCATIONS': {'default': lambda c: get_external_locations(c)},
'DATA_LOCATIONS': {'default': lambda c: get_data_locations(c)},
'CHROME_OPTIONS': {'default': lambda c: get_chrome_info(c)},
}
################################### Helpers ####################################
def load_config_val(key: str,
default: ConfigDefaultValue=None,
type: Optional[Type]=None,
aliases: Optional[Tuple[str, ...]]=None,
config: Optional[ConfigDict]=None,
env_vars: Optional[os._Environ]=None,
config_file_vars: Optional[Dict[str, str]]=None) -> ConfigValue:
"""parse bool, int, and str key=value pairs from env"""
config_keys_to_check = (key, *(aliases or ()))
for key in config_keys_to_check:
if env_vars:
val = env_vars.get(key)
if val:
break
if config_file_vars:
val = config_file_vars.get(key)
if val:
break
if type is None or val is None:
if callable(default):
assert isinstance(config, dict)
return default(config)
return default
elif type is bool:
if val.lower() in ('true', 'yes', '1'):
return True
elif val.lower() in ('false', 'no', '0'):
return False
else:
raise ValueError(f'Invalid configuration option {key}={val} (expected a boolean: True/False)')
elif type is str:
if val.lower() in ('true', 'false', 'yes', 'no', '1', '0'):
raise ValueError(f'Invalid configuration option {key}={val} (expected a string)')
return val.strip()
elif type is int:
if not val.isdigit():
raise ValueError(f'Invalid configuration option {key}={val} (expected an integer)')
return int(val)
elif type is list or type is dict:
return json.loads(val)
raise Exception('Config values can only be str, bool, int or json')
def load_config_file(out_dir: str=None) -> Optional[Dict[str, str]]:
"""load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf"""
out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve()
config_path = Path(out_dir) / CONFIG_FILENAME
if config_path.exists():
config_file = ConfigParser()
config_file.optionxform = str
config_file.read(config_path)
# flatten into one namespace
config_file_vars = {
key.upper(): val
for section, options in config_file.items()
for key, val in options.items()
}
# print('[i] Loaded config file', os.path.abspath(config_path))
# print(config_file_vars)
return config_file_vars
return None
def write_config_file(config: Dict[str, str], out_dir: str=None) -> ConfigDict:
"""load the ini-formatted config file from OUTPUT_DIR/Archivebox.conf"""
from .system import atomic_write
CONFIG_HEADER = (
"""# This is the config file for your ArchiveBox collection.
#
# You can add options here manually in INI format, or automatically by running:
# archivebox config --set KEY=VALUE
#
# If you modify this file manually, make sure to update your archive after by running:
# archivebox init
#
# A list of all possible config with documentation and examples can be found here:
# https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration
""")
out_dir = out_dir or Path(os.getenv('OUTPUT_DIR', '.')).resolve()
config_path = Path(out_dir) / CONFIG_FILENAME
if not config_path.exists():
atomic_write(config_path, CONFIG_HEADER)
config_file = ConfigParser()
config_file.optionxform = str
config_file.read(config_path)
with open(config_path, 'r', encoding='utf-8') as old:
atomic_write(f'{config_path}.bak', old.read())
find_section = lambda key: [name for name, opts in CONFIG_SCHEMA.items() if key in opts][0]
# Set up sections in empty config file
for key, val in config.items():
section = find_section(key)
if section in config_file:
existing_config = dict(config_file[section])
else:
existing_config = {}
config_file[section] = {**existing_config, key: val}
# always make sure there's a SECRET_KEY defined for Django
existing_secret_key = None
if 'SERVER_CONFIG' in config_file and 'SECRET_KEY' in config_file['SERVER_CONFIG']:
existing_secret_key = config_file['SERVER_CONFIG']['SECRET_KEY']
if (not existing_secret_key) or ('not a valid secret' in existing_secret_key):
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
random_secret_key = get_random_string(50, chars)
if 'SERVER_CONFIG' in config_file:
config_file['SERVER_CONFIG']['SECRET_KEY'] = random_secret_key
else:
config_file['SERVER_CONFIG'] = {'SECRET_KEY': random_secret_key}
with open(config_path, 'w+', encoding='utf-8') as new:
config_file.write(new)
try:
# validate the config by attempting to re-parse it
CONFIG = load_all_config()
except BaseException: # lgtm [py/catch-base-exception]
# something went horribly wrong, rever to the previous version
with open(f'{config_path}.bak', 'r', encoding='utf-8') as old:
atomic_write(config_path, old.read())
raise
if Path(f'{config_path}.bak').exists():
os.remove(f'{config_path}.bak')
return {
key.upper(): CONFIG.get(key.upper())
for key in config.keys()
}
def load_config(defaults: ConfigDefaultDict,
config: Optional[ConfigDict]=None,
out_dir: Optional[str]=None,
env_vars: Optional[os._Environ]=None,
config_file_vars: Optional[Dict[str, str]]=None) -> ConfigDict:
env_vars = env_vars or os.environ
config_file_vars = config_file_vars or load_config_file(out_dir=out_dir)
extended_config: ConfigDict = config.copy() if config else {}
for key, default in defaults.items():
try:
extended_config[key] = load_config_val(
key,
default=default['default'],
type=default.get('type'),
aliases=default.get('aliases'),
config=extended_config,
env_vars=env_vars,
config_file_vars=config_file_vars,
)
except KeyboardInterrupt:
raise SystemExit(0)
except Exception as e:
stderr()
stderr(f'[X] Error while loading configuration value: {key}', color='red', config=extended_config)
stderr(' {}: {}'.format(e.__class__.__name__, e))
stderr()
stderr(' Check your config for mistakes and try again (your archive data is unaffected).')
stderr()
stderr(' For config documentation and examples see:')
stderr(' https://github.com/ArchiveBox/ArchiveBox/wiki/Configuration')
stderr()
# raise
raise SystemExit(2)
return extended_config
# def write_config(config: ConfigDict):
# with open(os.path.join(config['OUTPUT_DIR'], CONFIG_FILENAME), 'w+') as f:
# Logging Helpers
def stdout(*args, color: Optional[str]=None, prefix: str='', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if color:
strs = [ansi[color], ' '.join(str(a) for a in args), ansi['reset'], '\n']
else:
strs = [' '.join(str(a) for a in args), '\n']
sys.stdout.write(prefix + ''.join(strs))
def stderr(*args, color: Optional[str]=None, prefix: str='', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if color:
strs = [ansi[color], ' '.join(str(a) for a in args), ansi['reset'], '\n']
else:
strs = [' '.join(str(a) for a in args), '\n']
sys.stderr.write(prefix + ''.join(strs))
def hint(text: Union[Tuple[str, ...], List[str], str], prefix=' ', config: Optional[ConfigDict]=None) -> None:
ansi = DEFAULT_CLI_COLORS if (config or {}).get('USE_COLOR') else ANSI
if isinstance(text, str):
stderr('{}{lightred}Hint:{reset} {}'.format(prefix, text, **ansi))
else:
stderr('{}{lightred}Hint:{reset} {}'.format(prefix, text[0], **ansi))
for line in text[1:]:
stderr('{} {}'.format(prefix, line))
# Dependency Metadata Helpers
def bin_version(binary: Optional[str]) -> Optional[str]:
"""check the presence and return valid version line of a specified binary"""
abspath = bin_path(binary)
if not binary or not abspath:
return None
try:
version_str = run([abspath, "--version"], stdout=PIPE, env={'LANG': 'C'}).stdout.strip().decode()
# take first 3 | |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.tracker.issuedetail."""
import logging
import mox
import time
import unittest
import settings
from proto import features_pb2
from features import hotlist_views
from features import notify
from framework import framework_views
from framework import monorailrequest
from framework import permissions
from framework import profiler
from framework import template_helpers
from proto import project_pb2
from proto import tracker_pb2
from proto import user_pb2
from services import service_manager
from services import issue_svc
from testing import fake
from testing import testing_helpers
from tracker import issuedetail
from tracker import tracker_constants
from tracker import tracker_helpers
class IssueDetailTest(unittest.TestCase):
def setUp(self):
self.cnxn = 'fake cnxn'
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
user=fake.UserService(),
project=fake.ProjectService(),
issue_star=fake.IssueStarService(),
spam=fake.SpamService())
self.project = self.services.project.TestAddProject('proj', project_id=987)
self.config = tracker_pb2.ProjectIssueConfig()
self.config.statuses_offer_merge.append('Duplicate')
self.services.config.StoreConfig(self.cnxn, self.config)
def testChooseNextPage(self):
mr = testing_helpers.MakeMonorailRequest(
path='/p/proj/issues/detail?id=123&q=term')
mr.col_spec = ''
config = tracker_pb2.ProjectIssueConfig()
issue = fake.MakeTestIssue(987, 123, 'summary', 'New', 111L)
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, None,
user_pb2.IssueUpdateNav.UP_TO_LIST, '124')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?cursor=proj%3A123&q=term'))
self.assertTrue(url.endswith('&updated=123'))
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, None,
user_pb2.IssueUpdateNav.STAY_SAME_ISSUE, '124')
self.assertEqual('http://127.0.0.1/p/proj/issues/detail?id=123&q=term',
url)
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, None,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '124')
self.assertEqual('http://127.0.0.1/p/proj/issues/detail?id=124&q=term',
url)
# If this is the last in the list, the next_id from the form will be ''.
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, None,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?cursor=proj%3A123&q=term'))
self.assertTrue(url.endswith('&updated=123'))
def testChooseNextPage_ForMoveRequest(self):
mr = testing_helpers.MakeMonorailRequest(
path='/p/proj/issues/detail?id=123&q=term')
mr.col_spec = ''
config = tracker_pb2.ProjectIssueConfig()
issue = fake.MakeTestIssue(987, 123, 'summary', 'New', 111L)
moved_to_project_name = 'projB'
moved_to_project_local_id = 543
moved_to_project_name_and_local_id = (moved_to_project_name,
moved_to_project_local_id)
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, moved_to_project_name_and_local_id, None,
user_pb2.IssueUpdateNav.UP_TO_LIST, '124')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?cursor=proj%3A123&moved_to_id=' +
str(moved_to_project_local_id) + '&moved_to_project=' +
moved_to_project_name + '&q=term'))
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, moved_to_project_name_and_local_id, None,
user_pb2.IssueUpdateNav.STAY_SAME_ISSUE, '124')
self.assertEqual(
'http://127.0.0.1/p/%s/issues/detail?id=123&q=term' % (
moved_to_project_name),
url)
mr.project_name = 'proj' # reset project name back.
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, moved_to_project_name_and_local_id, None,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '124')
self.assertEqual('http://127.0.0.1/p/proj/issues/detail?id=124&q=term',
url)
# If this is the last in the list, the next_id from the form will be ''.
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, moved_to_project_name_and_local_id, None,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?cursor=proj%3A123&moved_to_id=' +
str(moved_to_project_local_id) + '&moved_to_project=' +
moved_to_project_name + '&q=term'))
def testChooseNextPage_ForCopyRequest(self):
mr = testing_helpers.MakeMonorailRequest(
path='/p/proj/issues/detail?id=123&q=term')
mr.col_spec = ''
config = tracker_pb2.ProjectIssueConfig()
issue = fake.MakeTestIssue(987, 123, 'summary', 'New', 111L)
copied_to_project_name = 'projB'
copied_to_project_local_id = 543
copied_to_project_name_and_local_id = (copied_to_project_name,
copied_to_project_local_id)
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, copied_to_project_name_and_local_id,
user_pb2.IssueUpdateNav.UP_TO_LIST, '124')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?copied_from_id=123'
'&copied_to_id=' + str(copied_to_project_local_id) +
'&copied_to_project=' + copied_to_project_name +
'&cursor=proj%3A123&q=term'))
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, copied_to_project_name_and_local_id,
user_pb2.IssueUpdateNav.STAY_SAME_ISSUE, '124')
self.assertEqual('http://127.0.0.1/p/proj/issues/detail?id=123&q=term', url)
mr.project_name = 'proj' # reset project name back.
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, copied_to_project_name_and_local_id,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '124')
self.assertEqual('http://1192.168.127.12/p/proj/issues/detail?id=124&q=term',
url)
# If this is the last in the list, the next_id from the form will be ''.
url = issuedetail._ChooseNextPage(
mr, issue.local_id, config, None, copied_to_project_name_and_local_id,
user_pb2.IssueUpdateNav.NEXT_IN_LIST, '')
self.assertTrue(url.startswith(
'http://127.0.0.1/p/proj/issues/list?copied_from_id=123'
'&copied_to_id=' + str(copied_to_project_local_id) +
'&copied_to_project=' + copied_to_project_name +
'&cursor=proj%3A123&q=term'))
def testGatherHelpData_Anon(self):
servlet = issuedetail.IssueDetail('req', 'res', services=self.services)
mr = testing_helpers.MakeMonorailRequest()
mr.auth.user_id = 0
# Anon users do not see dismissable cues unless there is something relevant
# in the page_data to trigger it.
help_data = servlet.GatherHelpData(mr, {})
self.assertEqual(None, help_data['cue'])
def testGatherHelpData_SignedIn(self):
servlet = issuedetail.IssueDetail('req', 'res', services=self.services)
mr = testing_helpers.MakeMonorailRequest()
mr.auth.user_id = 111L
# User needs to click through the privacy dialog.
help_data = servlet.GatherHelpData(mr, {})
self.assertEqual('privacy_click_through', help_data['cue'])
mr.auth.user_pb.dismissed_cues = ['privacy_click_through']
# User did not jump to an issue, no query at all.
help_data = servlet.GatherHelpData(mr, {})
self.assertEqual(None, help_data['cue'])
# User did not jump to an issue, query was not a local ID number.
mr.query = 'memory leak'
help_data = servlet.GatherHelpData(mr, {})
self.assertEqual(None, help_data['cue'])
# User jumped directly to an issue, maybe they meant to search instead.
mr.query = '123'
help_data = servlet.GatherHelpData(mr, {})
self.assertEqual('search_for_numbers', help_data['cue'])
self.assertEqual(123, help_data['jump_local_id'])
# User is viewing an issue with an unavailable owner.
mr.query = ''
issue_view = testing_helpers.Blank(
is_spam=False,
owner=testing_helpers.Blank(user_id=111L, avail_message='On vacation'),
derived_owner=testing_helpers.Blank(user_id=0L, avail_message=''),
cc=[testing_helpers.Blank(user_id=222L, avail_message='')],
derived_cc=[testing_helpers.Blank(user_id=333L, avail_message='')])
page_data = {'issue': issue_view}
help_data = servlet.GatherHelpData(mr, page_data)
self.assertEqual('availibility_msgs', help_data['cue'])
# User is viewing an issue with all participants available.
# No help cue is shown.
issue_view = testing_helpers.Blank(
is_spam=False,
owner=testing_helpers.Blank(user_id=0L, avail_message='Never visited'),
derived_owner=testing_helpers.Blank(user_id=0L, avail_message=''),
cc=[testing_helpers.Blank(user_id=222L, avail_message='')],
derived_cc=[testing_helpers.Blank(user_id=333L, avail_message='')])
page_data = {'issue': issue_view}
help_data = servlet.GatherHelpData(mr, page_data)
self.assertEqual(None, help_data['cue'])
class IssueDetailFunctionsTest(unittest.TestCase):
def setUp(self):
self.project_name = 'proj'
self.project_id = 987
self.cnxn = 'fake cnxn'
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
issue_star=fake.IssueStarService(),
project=fake.ProjectService(),
user=fake.UserService())
self.project = self.services.project.TestAddProject(
'proj', project_id=987, committer_ids=[111L])
self.servlet = issuedetail.IssueDetail(
'req', 'res', services=self.services)
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
def VerifyShouldShowFlipper(
self, expected, query, sort_spec, can, create_issues=0):
"""Instantiate a _Flipper and check if makes a pipeline or not."""
services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
project=fake.ProjectService(),
user=fake.UserService())
mr = testing_helpers.MakeMonorailRequest(project=self.project)
mr.query = query
mr.sort_spec = sort_spec
mr.can = can
mr.project_name = self.project.project_name
mr.project = self.project
for idx in range(create_issues):
_local_id = services.issue.CreateIssue(
self.cnxn, services, self.project.project_id,
'summary_%d' % idx, 'status', 111L, [], [], [], [], 111L,
'description_%d' % idx)
self.assertEqual(
expected,
issuedetail._ShouldShowFlipper(mr, services))
def testShouldShowFlipper_RegularSizedProject(self):
# If the user is looking for a specific issue, no flipper.
self.VerifyShouldShowFlipper(
False, '123', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(False, '123', '', 5)
self.VerifyShouldShowFlipper(
False, '123', 'priority', tracker_constants.OPEN_ISSUES_CAN)
# If the user did a search or sort or all in a small can, show flipper.
self.VerifyShouldShowFlipper(
True, 'memory leak', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(
True, 'id=1,2,3', '', tracker_constants.OPEN_ISSUES_CAN)
# Any can other than 1 or 2 is doing a query and so it should have a
# failry narrow result set size. 5 is issues starred by me.
self.VerifyShouldShowFlipper(True, '', '', 5)
self.VerifyShouldShowFlipper(
True, '', 'status', tracker_constants.OPEN_ISSUES_CAN)
# In a project without a huge number of issues, still show the flipper even
# if there was no specific query.
self.VerifyShouldShowFlipper(
True, '', '', tracker_constants.OPEN_ISSUES_CAN)
def testShouldShowFlipper_LargeSizedProject(self):
settings.threshold_to_suppress_prev_next = 1
# In a project that has tons of issues, save time by not showing the
# flipper unless there was a specific query, sort, or can.
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.ALL_ISSUES_CAN, create_issues=3)
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.OPEN_ISSUES_CAN, create_issues=3)
def testFieldEditPermitted_NoEdit(self):
page_perms = testing_helpers.Blank(
EditIssueSummary=False, EditIssueStatus=False, EditIssueOwner=False,
EditIssueCc=False) # no perms are needed.
self.assertTrue(issuedetail._FieldEditPermitted(
[], '', '', '', '', 0, [], page_perms))
def testFieldEditPermitted_AllNeededPerms(self):
page_perms = testing_helpers.Blank(
EditIssueSummary=True, EditIssueStatus=True, EditIssueOwner=True,
EditIssueCc=True)
self.assertTrue(issuedetail._FieldEditPermitted(
[], '', '', 'new sum', 'new status', 111L, [222L], page_perms))
def testFieldEditPermitted_MissingPerms(self):
page_perms = testing_helpers.Blank(
EditIssueSummary=False, EditIssueStatus=False, EditIssueOwner=False,
EditIssueCc=False) # no perms.
self.assertFalse(issuedetail._FieldEditPermitted(
[], '', '', 'new sum', '', 0, [], page_perms))
self.assertFalse(issuedetail._FieldEditPermitted(
[], '', '', '', 'new status', 0, [], page_perms))
self.assertFalse(issuedetail._FieldEditPermitted(
[], '', '', '', '', 111L, [], page_perms))
self.assertFalse(issuedetail._FieldEditPermitted(
[], '', '', '', '', 0, [222L], page_perms))
def testFieldEditPermitted_NeededPermsNotOffered(self):
"""Even if user has all the field-level perms, they still can't do this."""
page_perms = testing_helpers.Blank(
EditIssueSummary=True, EditIssueStatus=True, EditIssueOwner=True,
EditIssueCc=True)
self.assertFalse(issuedetail._FieldEditPermitted(
['NewLabel'], '', '', '', '', 0, [], page_perms))
self.assertFalse(issuedetail._FieldEditPermitted(
[], 'new blocked on', '', '', '', 0, [], page_perms))
self.assertFalse(issuedetail._FieldEditPermitted(
[], '', 'new blocking', '', '', 0, [], page_perms))
def testValidateOwner_ChangedToValidOwner(self):
post_data_owner = '<EMAIL>'
parsed_owner_id = 111
original_issue_owner_id = 111
mr = testing_helpers.MakeMonorailRequest(project=self.project)
self.mox.StubOutWithMock(tracker_helpers, 'IsValidIssueOwner')
tracker_helpers.IsValidIssueOwner(
mr.cnxn, mr.project, parsed_owner_id, self.services).AndReturn(
(True, ''))
self.mox.ReplayAll()
ret = self.servlet._ValidateOwner(
mr, post_data_owner, parsed_owner_id, original_issue_owner_id)
self.mox.VerifyAll()
self.assertIsNone(ret)
def testValidateOwner_UnchangedInvalidOwner(self):
post_data_owner = '<EMAIL>'
parsed_owner_id = 111
original_issue_owner_id = 111
mr = testing_helpers.MakeMonorailRequest(project=self.project)
self.services.user.TestAddUser(post_data_owner, original_issue_owner_id)
self.mox.StubOutWithMock(tracker_helpers, 'IsValidIssueOwner')
tracker_helpers.IsValidIssueOwner(
mr.cnxn, mr.project, parsed_owner_id, self.services).AndReturn(
(False, 'invalid owner'))
self.mox.ReplayAll()
ret = self.servlet._ValidateOwner(
mr, post_data_owner, parsed_owner_id, original_issue_owner_id)
self.mox.VerifyAll()
self.assertIsNone(ret)
def testValidateOwner_ChangedFromValidToInvalidOwner(self):
post_data_owner = 'lexluthor'
parsed_owner_id = 111
original_issue_owner_id = 111
original_issue_owner = '<EMAIL>'
mr = testing_helpers.MakeMonorailRequest(project=self.project)
self.services.user.TestAddUser(original_issue_owner,
original_issue_owner_id)
self.mox.StubOutWithMock(tracker_helpers, 'IsValidIssueOwner')
tracker_helpers.IsValidIssueOwner(
mr.cnxn, mr.project, parsed_owner_id, self.services).AndReturn(
(False, 'invalid owner'))
self.mox.ReplayAll()
ret = self.servlet._ValidateOwner(
mr, post_data_owner, parsed_owner_id, original_issue_owner_id)
self.mox.VerifyAll()
self.assertEquals('invalid owner', ret)
def testValidateCC(self):
cc_ids = [1L, 2L]
cc_names = ['<EMAIL>', '<EMAIL>']
res = self.servlet._ValidateCC(cc_ids, cc_names)
self.assertIsNone(res)
cc_ids = [None, 2L]
res = self.servlet._ValidateCC(cc_ids, cc_names)
self.assertEqual(res, 'Invalid Cc username: user1@example')
def testProcessFormData_NoPermission(self):
"""Anonymous users and users without ADD_ISSUE_COMMENT cannot comment."""
local_id_1 = self.services.issue.CreateIssue(
self.cnxn, self.services, self.project.project_id,
'summary_1', 'status', 111L, [], [], [], [], 111L, 'description_1')
_, mr = testing_helpers.GetRequestObjects(
project=self.project,
perms=permissions.CONTRIBUTOR_INACTIVE_PERMISSIONSET)
mr.auth.user_id = 0
mr.local_id = local_id_1
self.assertRaises(permissions.PermissionException,
self.servlet.ProcessFormData, mr, {})
mr.auth.user_id = 111L
self.assertRaises(permissions.PermissionException,
self.servlet.ProcessFormData, mr, {})
def testProcessFormData_NonMembersCantEdit(self):
"""Non-members can comment, but never affect issue fields."""
orig_prepsend = notify.PrepareAndSendIssueChangeNotification
notify.PrepareAndSendIssueChangeNotification = | |
<reponame>4ekin/raw-packet<filename>raw_packet/Scanners/scanner.py
# region Description
"""
scanner.py: Scan local network
Author: <NAME>
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
# region Raw-packet modules
from raw_packet.Utils.base import Base
from raw_packet.Scanners.arp_scanner import ArpScan
from raw_packet.Scanners.icmpv6_scanner import ICMPv6Scan
# endregion
# region Import libraries
import xml.etree.ElementTree as ET
import subprocess as sub
from prettytable import PrettyTable
from os.path import dirname, abspath, isfile
from os import remove
from typing import Union, List, Dict
current_path = dirname((abspath(__file__)))
# endregion
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Main class - Scanner
class Scanner:
# region Variables
base: Base = Base()
arp_scan: ArpScan = ArpScan()
icmpv6_scan: ICMPv6Scan = ICMPv6Scan()
nmap_scan_result: str = current_path + '/nmap_scan.xml'
# endregion
# region Init
def __init__(self):
if not self.base.check_installed_software('nmap'):
self.base.print_error('Could not find program: ', 'nmap')
exit(1)
# endregion
# region Apple device selection
def apple_device_selection(self, apple_devices: Union[None, List[List[str]]],
exit_on_failure: bool = False) -> Union[None, List[str]]:
try:
assert apple_devices is not None, 'List of Apple devices is None!'
assert len(apple_devices) != 0, 'List of Apple devices is empty!'
for apple_device in apple_devices:
assert len(apple_device) == 3, \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
assert (self.base.ip_address_validation(ip_address=apple_device[0]) or
self.base.ipv6_address_validation(ipv6_address=apple_device[0])), \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
assert self.base.mac_address_validation(mac_address=apple_device[1]), \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
apple_device: Union[None, List[str]] = None
if len(apple_devices) == 1:
apple_device = apple_devices[0]
self.base.print_info('Only one Apple device found:')
self.base.print_success(apple_device[0] + ' (' + apple_device[1] + ') ', apple_device[2])
if len(apple_devices) > 1:
self.base.print_info('Apple devices found:')
device_index: int = 1
apple_devices_pretty_table = PrettyTable([self.base.cINFO + 'Index' + self.base.cEND,
self.base.cINFO + 'IP address' + self.base.cEND,
self.base.cINFO + 'MAC address' + self.base.cEND,
self.base.cINFO + 'Vendor' + self.base.cEND])
for apple_device in apple_devices:
apple_devices_pretty_table.add_row([str(device_index), apple_device[0],
apple_device[1], apple_device[2]])
device_index += 1
print(apple_devices_pretty_table)
device_index -= 1
current_device_index = input(self.base.c_info + 'Set device index from range (1-' +
str(device_index) + '): ')
if not current_device_index.isdigit():
self.base.print_error('Your input data is not digit!')
return None
if any([int(current_device_index) < 1, int(current_device_index) > device_index]):
self.base.print_error('Your number is not within range (1-' + str(device_index) + ')')
return None
current_device_index = int(current_device_index) - 1
apple_device = apple_devices[current_device_index]
return apple_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region IPv4 device selection
def ipv4_device_selection(self, ipv4_devices: Union[None, List[Dict[str, str]]],
exit_on_failure: bool = False) -> Union[None, Dict[str, str]]:
try:
assert ipv4_devices is not None, 'List of IPv4 devices is None!'
assert len(ipv4_devices) != 0, 'List of IPv4 devices is empty!'
for ipv4_device in ipv4_devices:
assert len(ipv4_device) == 3, \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'ip-address' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.ip_address_validation(ipv4_device['ip-address']), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'mac-address' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.mac_address_validation(ipv4_device['mac-address']), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'vendor' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
ipv4_device: Union[None, Dict[str, str]] = None
# region IPv4 devices is found
# region Only one IPv4 device found
if len(ipv4_devices) == 1:
ipv4_device: Dict[str, str] = ipv4_devices[0]
self.base.print_info('Only one IPv4 device found:')
self.base.print_success(ipv4_device['ip-address'] + ' (' + ipv4_device['mac-address'] + ') ' +
ipv4_device['vendor'])
# endregion
# region More than one IPv4 device found
if len(ipv4_devices) > 1:
self.base.print_success('Found ', str(len(ipv4_devices)), ' IPv4 alive hosts!')
device_index: int = 1
pretty_table = PrettyTable([self.base.info_text('Index'),
self.base.info_text('IPv4 address'),
self.base.info_text('MAC address'),
self.base.info_text('Vendor')])
for ipv4_device in ipv4_devices:
pretty_table.add_row([str(device_index), ipv4_device['ip-address'],
ipv4_device['mac-address'], ipv4_device['vendor']])
device_index += 1
print(pretty_table)
device_index -= 1
current_device_index: Union[int, str] = \
input(self.base.c_info + 'Set device index from range (1-' + str(device_index) + '): ')
assert current_device_index.isdigit(), \
'Your input data is not digit!'
current_device_index: int = int(current_device_index)
assert not any([current_device_index < 1, current_device_index > device_index]), \
'Your number is not within range (1-' + str(device_index) + ')'
current_device_index: int = int(current_device_index) - 1
ipv4_device: Dict[str, str] = ipv4_devices[current_device_index]
# endregion
# endregion
# region IPv4 devices not found
else:
if exit_on_failure:
self.base.print_error('Could not find IPv4 devices!')
exit(1)
# endregion
return ipv4_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region IPv6 device selection
def ipv6_device_selection(self, ipv6_devices: Union[None, List[Dict[str, str]]],
exit_on_failure: bool = False) -> Union[None, Dict[str, str]]:
try:
assert ipv6_devices is not None, 'List of IPv6 devices is None!'
assert len(ipv6_devices) != 0, 'List of IPv6 devices is empty!'
for ipv6_device in ipv6_devices:
assert len(ipv6_device) == 3, \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'ip-address' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.ipv6_address_validation(ipv6_device['ip-address']), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'mac-address' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.mac_address_validation(ipv6_device['mac-address']), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'vendor' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
ipv6_device: Union[None, Dict[str, str]] = None
# region IPv6 devices is found
# region Only one IPv6 device found
if len(ipv6_devices) == 1:
ipv6_device: Dict[str, str] = ipv6_devices[0]
self.base.print_info('Only one IPv6 device found:')
self.base.print_success(ipv6_device['ip-address'] + ' (' + ipv6_device['mac-address'] + ') ' +
ipv6_device['vendor'])
# endregion
# region More than one IPv6 device found
if len(ipv6_devices) > 1:
self.base.print_success('Found ', str(len(ipv6_devices)), ' IPv6 alive hosts!')
device_index: int = 1
pretty_table = PrettyTable([self.base.info_text('Index'),
self.base.info_text('IPv6 address'),
self.base.info_text('MAC address'),
self.base.info_text('Vendor')])
for ipv6_device in ipv6_devices:
pretty_table.add_row([str(device_index), ipv6_device['ip-address'],
ipv6_device['mac-address'], ipv6_device['vendor']])
device_index += 1
print(pretty_table)
device_index -= 1
current_device_index: Union[int, str] = \
input(self.base.c_info + 'Set device index from range (1-' + str(device_index) + '): ')
assert current_device_index.isdigit(), \
'Your input data is not digit!'
current_device_index: int = int(current_device_index)
assert not any([current_device_index < 1, current_device_index > device_index]), \
'Your number is not within range (1-' + str(device_index) + ')'
current_device_index: int = int(current_device_index) - 1
ipv6_device: Dict[str, str] = ipv6_devices[current_device_index]
# endregion
# endregion
# region IPv6 devices not found
else:
if exit_on_failure:
self.base.print_error('Could not find IPv6 devices!')
exit(1)
# endregion
return ipv6_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find all devices in local network
def find_ip_in_local_network(self,
network_interface: str = 'eth0',
timeout: int = 3, retry: int = 3,
show_scan_percentage: bool = True,
exit_on_failure: bool = True) -> Union[None, List[str]]:
try:
local_network_ip_addresses: List[str] = list()
arp_scan_results = self.arp_scan.scan(network_interface=network_interface, timeout=timeout,
retry=retry, exit_on_failure=False, check_vendor=True,
show_scan_percentage=show_scan_percentage)
assert len(arp_scan_results) != 0, \
'Could not find network devices on interface: ' + self.base.error_text(network_interface)
for device in arp_scan_results:
if self.base.ip_address_validation(device['ip-address']):
local_network_ip_addresses.append(device['ip-address'])
return local_network_ip_addresses
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find Apple devices in local network with arp_scan
def find_apple_devices_by_mac(self, network_interface: str = 'eth0',
timeout: int = 3, retry: int = 3,
show_scan_percentage: bool = True,
exit_on_failure: bool = True) -> Union[None, List[List[str]]]:
try:
apple_devices: List[List[str]] = list()
arp_scan_results = self.arp_scan.scan(network_interface=network_interface, timeout=timeout,
retry=retry, exit_on_failure=False, check_vendor=True,
show_scan_percentage=show_scan_percentage)
assert len(arp_scan_results) != 0, \
'Could not find network devices on interface: ' + self.base.error_text(network_interface)
for device in arp_scan_results:
if 'Apple' in device['vendor']:
apple_devices.append([device['ip-address'], device['mac-address'], device['vendor']])
assert len(apple_devices) != 0, \
'Could not find Apple devices on interface: ' + self.base.error_text(network_interface)
return apple_devices
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find Apple devices in local | |
import typing
import discord
import asyncio
import re
from discord.ext import commands
from collections import Counter
from utilities import decorators
from utilities import converters
from utilities import checks
from utilities import helpers
def setup(bot):
bot.add_cog(Server(bot))
class Server(commands.Cog):
"""
Module for server management
"""
def __init__(self, bot):
self.bot = bot
self.mregex = re.compile(r"[0-9]{17,21}")
self.dregex = re.compile(
r"(?:https?://)?discord(?:app)?\.(?:com/invite|gg)/[a-zA-Z0-9]+/?"
)
self.uregex = re.compile(
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
###################
## Prune Command ##
###################
@decorators.group(
brief="Purge any type of content.",
aliases=["prune", "delete"],
description="Methods:"
"\nAll - Purge all messages."
"\nBots - Purge messages sent by bots."
"\nContains - Custom purge messages."
"\nEmbeds - Purge messages with embeds."
"\nEmojis - Purge messages with emojis."
"\nFiles - Purge messages with attachments."
"\nHumans - Purge messages sent by humans."
"\nImages - Purge messages with images."
"\nInvites - Purge messages with invites."
"\nMentions - Purge messages with mentions."
"\nReactions - Purge reactions from messages."
"\nUntil - Purge messages until a message."
"\nUrls - Purge messages with URLs."
"\nUser - Purge messages sent by a user."
"\nWebhooks - Purge messages sent by wehooks.",
)
@checks.guild_only()
@checks.bot_has_perms(manage_messages=True)
@checks.has_perms(manage_messages=True)
@checks.cooldown()
async def purge(self, ctx):
"""
Usage: {0}purge <option> <amount>
Aliases: {0}prune, {0}delete
Permission: Manage Messages
Options:
all, bots, contains, embeds,
emojis, files, humans, images,
invites, mentions, reactions,
until, urls, user, webhooks.
Output:
Deletes messages that match
a specific search criteria
Examples:
{0}prune user Hecate
{0}prune bots
{0}prune invites 1000
Notes:
Specify the amount kwarg
to search that number of
messages. For example,
{0}prune user Hecate 1000
will search for all messages
in the past 1000 sent in the
channel, and delete all that
were sent by Hecate.
Default amount is 100.
"""
args = str(ctx.message.content).split()
if ctx.invoked_subcommand is None:
try:
search = int(args[1])
except (IndexError, ValueError):
return await ctx.usage("<option> [search=100]")
await self._remove_all(ctx, search=search)
async def do_removal(
self, ctx, limit, predicate, *, before=None, after=None, message=True
):
if limit > 2000:
return await ctx.send_or_reply(
f"Too many messages to search given ({limit}/2000)",
)
if not before:
before = ctx.message
else:
before = discord.Object(id=before)
if after:
after = discord.Object(id=after)
if predicate:
coro = ctx.channel.purge(
limit=limit, before=before, after=after, check=predicate
)
else:
coro = ctx.channel.purge(limit=limit, before=before, after=after)
try:
deleted = await coro
except discord.Forbidden:
return await ctx.fail(
"I do not have permissions to delete messages.", refer=False
)
except discord.HTTPException as e:
return await ctx.fail(f"Error: {e} (try a smaller search?)", refer=False)
deleted = len(deleted)
if message is True:
msg = await ctx.send(
f"{self.bot.emote_dict['trash']} Deleted {deleted} message{'' if deleted == 1 else 's'}",
)
await asyncio.sleep(5)
to_delete = [msg.id, ctx.message.id]
await ctx.channel.purge(check=lambda m: m.id in to_delete)
@purge.command(brief="Purge messages with embeds.")
async def embeds(self, ctx, search=100):
"""
Usage: {0}purge embeds [amount]
Output:
Deletes all messages that
contain embeds in them.
Examples:
{0}purge embeds 2000
{0}prune embeds
"""
await self.do_removal(ctx, search, lambda e: len(e.embeds))
@purge.command(brief="Purge messages with invites.", aliases=["ads"])
async def invites(self, ctx, search=100):
"""
Usage: {0}purge invites [amount]
Alias: {0}purge ads
Output:
Deletes all messages with
invite links in them.
Examples:
{0}purge invites
{0}prune invites 125
"""
def predicate(m):
return self.dregex.search(m.content)
await self.do_removal(ctx, search, predicate)
@purge.command(aliases=["link", "url", "links"], brief="Purge messages with URLs.")
async def urls(self, ctx, search=100):
"""
Usage: {0}purge urls [amount]
Aliases:
{0}purge link
{0}purge links
{0}purge url
Output:
Deletes all messages that
contain URLs in them.
Examples:
{0}purge urls
{0}prune urls 125
"""
def predicate(m):
return self.uregex.search(m.content)
await self.do_removal(ctx, search, predicate)
@purge.command(brief="Purge messages with attachments.", aliases=["attachments"])
async def files(self, ctx, search=100):
"""
Usage: {0}purge files [amount]
Aliases:
{0}purge attachments
Output:
Deletes all messages that
contain attachments in them.
Examples:
{0}purge attachments
{0}prune files 125
"""
await self.do_removal(ctx, search, lambda e: len(e.attachments))
@purge.command(
brief="Purge messages with mentions.", aliases=["pings", "ping", "mention"]
)
async def mentions(self, ctx, search=100):
"""
Usage: -purge mentions [amount]
Aliases:
{0}purge pings
{0}purge ping
{0}purge mention
Output:
Deletes all messages that
contain user mentions in them.
Examples:
{0}purge mentions
{0}prune pings 125
"""
await self.do_removal(
ctx, search, lambda e: len(e.mentions) or len(e.role_mentions)
)
@purge.command(
brief="Purge messages with images.", aliases=["pictures", "pics", "image"]
)
async def images(self, ctx, search=100):
"""
Usage: {0}purge mentions [amount]
Aliases:
{0}purge pics
{0}purge pictures
{0}purge image
Output:
Deletes all messages that
contain images in them.
Examples:
{0}purge pictures
{0}prune images 125
"""
await self.do_removal(
ctx, search, lambda e: len(e.embeds) or len(e.attachments)
)
@purge.command(name="all", brief="Purge all messages.", aliases=["messages"])
async def _remove_all(self, ctx, search=100):
"""
Usage: {0}purge all [amount]
Aliases:
{0}purge
{0}purge messages
Output:
Deletes all messages.
Examples:
{0}purge
{0}prune 2000
{0}prune messages 125
"""
await self.do_removal(ctx, search, lambda e: True)
@purge.command(brief="Purge messages sent by a user.", aliases=["member"])
async def user(self, ctx, user: converters.DiscordUser, search=100):
"""
Usage: {0}purge user <user> [amount]
Aliases:
{0}purge member
Output:
Deletes all messages that
were sent by the passed user.
Examples:
{0}purge user
{0}prune member 125
"""
await self.do_removal(ctx, search, lambda e: e.author.id == user.id)
@purge.command(brief="Customize purging messages.", aliases=["has"])
async def contains(self, ctx, *, substr: str):
"""
Usage: {0}purge contains <string>
Alias:
{0}purge has
Output:
Deletes all messages that
contain the passed string.
Examples:
{0}purge contains hello
{0}prune has no
Notes:
The string must a minimum
of 2 characters in length.
"""
if len(substr) < 2:
await ctx.fail("The substring length must be at least 2 characters.")
else:
await self.do_removal(ctx, 100, lambda e: substr in e.content)
@purge.command(
name="bots", brief="Purge messages sent by bots.", aliases=["robots"]
)
async def _bots(self, ctx, search=100, prefix=None):
"""
Usage: {0}purge bots [amount] [prefix]
Alias:
{0}purge robots
Output:
Deletes all messages
that were sent by bots.
Examples:
{0}purge robots 200
{0}prune bots 150
Notes:
Specify an optional prefix to
remove all messages that start
with that prefix. This is useful
for removing command invocations
"""
if not str(search).isdigit():
prefix = search
search = 100
if prefix:
def predicate(m):
return (m.webhook_id is None and m.author.bot) or m.content.startswith(
prefix
)
else:
def predicate(m):
return m.webhook_id is None and m.author.bot
await self.do_removal(ctx, search, predicate)
@purge.command(
name="webhooks", aliases=["webhook"], brief="Purge messages sent by wehooks."
)
async def webhooks(self, ctx, search=100):
"""
Usage: {0}purge webhooks [amount]
Alias:
{0}purge webhook
Output:
Deletes all messages that
were sent by webhooks.
Examples:
{0}purge webhook
{0}prune webhooks 125
"""
def predicate(m):
return m.webhook_id
await self.do_removal(ctx, search, predicate)
@purge.command(
name="humans",
aliases=["users", "members", "people"],
brief="Purge messages sent by humans.",
)
async def _users(self, ctx, search=100):
"""
Usage: {0}purge humans [amount]
Aliases:
{0}purge users
{0}purge members
{0}purge people
Output:
Deletes all messages
sent by user accounts.
Bot and webhook messages
will not be deleted.
Examples:
{0}purge humans
{0}prune people 125
"""
def predicate(m):
return m.author.bot is False
await self.do_removal(ctx, search, predicate)
@purge.command(
name="emojis",
aliases=["emotes", "emote", "emoji"],
brief="Purge messages with emojis.",
)
async def _emojis(self, ctx, search=100):
"""
Usage: {0}purge emojis [amount]
Aliases:
{0}purge emotes
{0}purge emote
{0}purge emoji
Output:
Deletes all messages that
contain custom discord emojis.
Examples:
{0}purge emojis
{0}prune emotes 125
"""
custom_emoji = re.compile(r"<a?:(.*?):(\d{17,21})>|[\u263a-\U0001f645]")
def predicate(m):
return custom_emoji.search(m.content)
await self.do_removal(ctx, search, predicate)
@purge.command(name="reactions", brief="Purge reactions from messages.")
async def _reactions(self, ctx, search=100):
"""
Usage: {0}purge emojis [amount]
Output:
Demoves all reactions from
messages that were reacted on.
Examples:
{0}purge reactions
{0}prune reactions 125
Notes:
The messages are not deleted.
Only the reactions are removed.
"""
if search > 2000:
return await ctx.send_or_reply(
content=f"Too many messages to search for ({search}/2000)",
)
total_reactions = 0
async for message in ctx.history(limit=search, before=ctx.message):
if len(message.reactions):
total_reactions += sum(r.count for r in message.reactions)
await message.clear_reactions()
msg = await ctx.send_or_reply(
f'{self.bot.emote_dict["trash"]} Successfully removed {total_reactions} reactions.'
)
to_delete = [msg.id, ctx.message.id]
await ctx.channel.purge(check=lambda m: m.id in to_delete)
@purge.command(
name="until", aliases=["after"], brief="Purge messages after a message."
)
async def _until(self, ctx, message: discord.Message = None):
"""
Usage: {0}purge until <message id>
Alias: {0}purge after
Output:
Purges all messages until
the given message_id.
Given ID is not deleted
Examples:
{0}purge until 810377376269
{0}prune after 810377376269
"""
if message is None:
message = await converters.DiscordMessage().convert(ctx)
await self.do_removal(ctx, 100, None, after=message.id)
@purge.command(name="between", brief="Purge messages between 2 messages.")
async def _between(self, ctx, message1: discord.Message, message2: discord.Message):
"""
Usage: {0}purge between <message id> <message id>
Output:
Purges all messages until
the given message_id.
Given ID is | |
<reponame>mikan/racm
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.dataview
import gettext
_ = gettext.gettext
###########################################################################
## Class MainFrame
###########################################################################
class MainFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = _(u"Remote ADB Connection Manager"), pos = wx.DefaultPosition, size = wx.Size( 570,520 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.Size( 570,480 ), wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_MENU ) )
self.menu_bar = wx.MenuBar( 0 )
self.file_menu = wx.Menu()
self.add_menu_item = wx.MenuItem( self.file_menu, wx.ID_ANY, _(u"Add..."), _(u"Add a new target."), wx.ITEM_NORMAL )
self.file_menu.AppendItem( self.add_menu_item )
self.refresh_menu_item = wx.MenuItem( self.file_menu, wx.ID_ANY, _(u"Refresh"), _(u"Execute \"kill-server\" and \"start-server\"."), wx.ITEM_NORMAL )
self.file_menu.AppendItem( self.refresh_menu_item )
self.file_menu.AppendSeparator()
self.exit_menu_item = wx.MenuItem( self.file_menu, wx.ID_ANY, _(u"Exit"), _(u"Exit this application."), wx.ITEM_NORMAL )
self.file_menu.AppendItem( self.exit_menu_item )
self.menu_bar.Append( self.file_menu, _(u"File") )
self.edit_menu = wx.Menu()
self.settings_menu_item = wx.MenuItem( self.edit_menu, wx.ID_ANY, _(u"Settings..."), _(u"Change ADB path, custom buttons, and more."), wx.ITEM_NORMAL )
self.edit_menu.AppendItem( self.settings_menu_item )
self.edit_menu.AppendSeparator()
self.edit_menu_item = wx.MenuItem( self.edit_menu, wx.ID_ANY, _(u"Edit..."), _(u"Edit the selected target."), wx.ITEM_NORMAL )
self.edit_menu.AppendItem( self.edit_menu_item )
self.remove_menu_item = wx.MenuItem( self.edit_menu, wx.ID_ANY, _(u"Remove"), _(u"Remove the selected target."), wx.ITEM_NORMAL )
self.edit_menu.AppendItem( self.remove_menu_item )
self.menu_bar.Append( self.edit_menu, _(u"Edit") )
self.log_menu = wx.Menu()
self.logcat_menu_item = wx.MenuItem( self.log_menu, wx.ID_ANY, _(u"LogCat (EXPERIMENTAL)"), wx.EmptyString, wx.ITEM_NORMAL )
self.log_menu.AppendItem( self.logcat_menu_item )
self.menu_bar.Append( self.log_menu, _(u"Log") )
self.help_menu = wx.Menu()
self.releases_menu_item = wx.MenuItem( self.help_menu, wx.ID_ANY, _(u"Check releases..."), wx.EmptyString, wx.ITEM_NORMAL )
self.help_menu.AppendItem( self.releases_menu_item )
self.issues_menu_item = wx.MenuItem( self.help_menu, wx.ID_ANY, _(u"Check issues..."), wx.EmptyString, wx.ITEM_NORMAL )
self.help_menu.AppendItem( self.issues_menu_item )
self.help_menu.AppendSeparator()
self.about_menu_item = wx.MenuItem( self.help_menu, wx.ID_ANY, _(u"About..."), _(u"About this application."), wx.ITEM_NORMAL )
self.help_menu.AppendItem( self.about_menu_item )
self.menu_bar.Append( self.help_menu, _(u"Help") )
self.SetMenuBar( self.menu_bar )
wrapper = wx.BoxSizer( wx.HORIZONTAL )
self.host_list = wx.dataview.DataViewListCtrl( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.host_column = self.host_list.AppendTextColumn( _(u"Host") )
self.name_column = self.host_list.AppendTextColumn( _(u"Name") )
self.status_column = self.host_list.AppendTextColumn( _(u"Status") )
wrapper.Add( self.host_list, 1, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 5 )
list_area_box = wx.BoxSizer( wx.VERTICAL )
connection_box = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, _(u"Connection") ), wx.VERTICAL )
self.connect_button = wx.Button( connection_box.GetStaticBox(), wx.ID_ANY, _(u"Connect"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.connect_button.Enable( False )
connection_box.Add( self.connect_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.disconnect_button = wx.Button( connection_box.GetStaticBox(), wx.ID_ANY, _(u"Disconnect"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.disconnect_button.Enable( False )
connection_box.Add( self.disconnect_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
list_area_box.Add( connection_box, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
shell_box = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, _(u"Shell") ), wx.VERTICAL )
self.custom1_button = wx.Button( shell_box.GetStaticBox(), wx.ID_ANY, _(u"Custom 1"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom1_button.Enable( False )
shell_box.Add( self.custom1_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.custom2_button = wx.Button( shell_box.GetStaticBox(), wx.ID_ANY, _(u"Custom 2"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom2_button.Enable( False )
shell_box.Add( self.custom2_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.custom3_button = wx.Button( shell_box.GetStaticBox(), wx.ID_ANY, _(u"Custom 3"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom3_button.Enable( False )
shell_box.Add( self.custom3_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.shell_button = wx.Button( shell_box.GetStaticBox(), wx.ID_ANY, _(u"Shell..."), wx.DefaultPosition, wx.DefaultSize, 0 )
self.shell_button.Enable( False )
self.shell_button.Hide()
shell_box.Add( self.shell_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.apk_install_button = wx.Button( shell_box.GetStaticBox(), wx.ID_ANY, _(u"Install APK..."), wx.DefaultPosition, wx.DefaultSize, 0 )
self.apk_install_button.Enable( False )
shell_box.Add( self.apk_install_button, 0, wx.ALL|wx.EXPAND, 5 )
list_area_box.Add( shell_box, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
manage_box = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, _(u"Manage") ), wx.VERTICAL )
self.add_button = wx.Button( manage_box.GetStaticBox(), wx.ID_ANY, _(u"Add..."), wx.DefaultPosition, wx.DefaultSize, 0 )
manage_box.Add( self.add_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.remove_button = wx.Button( manage_box.GetStaticBox(), wx.ID_ANY, _(u"Remove"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.remove_button.Enable( False )
manage_box.Add( self.remove_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
self.logcat_button = wx.Button( manage_box.GetStaticBox(), wx.ID_ANY, _(u"LogCat..."), wx.DefaultPosition, wx.DefaultSize, 0 )
self.logcat_button.Enable( False )
self.logcat_button.Hide()
manage_box.Add( self.logcat_button, 0, wx.ALL, 5 )
list_area_box.Add( manage_box, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
wrapper.Add( list_area_box, 0, wx.EXPAND, 5 )
self.SetSizer( wrapper )
self.Layout()
self.status_bar = self.CreateStatusBar( 1, wx.ST_SIZEGRIP, wx.ID_ANY )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.on_main_closed )
self.Bind( wx.EVT_MENU, self.on_add_selected, id = self.add_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_refresh_selected, id = self.refresh_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_exit_selected, id = self.exit_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_settings_selected, id = self.settings_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_edit_selected, id = self.edit_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_remove_selected, id = self.remove_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_logcat_selected, id = self.logcat_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_releases_selected, id = self.releases_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_issues_selected, id = self.issues_menu_item.GetId() )
self.Bind( wx.EVT_MENU, self.on_about_selected, id = self.about_menu_item.GetId() )
self.Bind( wx.dataview.EVT_DATAVIEW_ITEM_ACTIVATED, self.on_host_selection_item_activated, id = wx.ID_ANY )
self.Bind( wx.dataview.EVT_DATAVIEW_SELECTION_CHANGED, self.on_host_selection_changed, id = wx.ID_ANY )
self.connect_button.Bind( wx.EVT_BUTTON, self.on_connect_clicked )
self.disconnect_button.Bind( wx.EVT_BUTTON, self.on_disconnect_clicked )
self.custom1_button.Bind( wx.EVT_BUTTON, self.on_custom1_clicked )
self.custom2_button.Bind( wx.EVT_BUTTON, self.on_custom2_clicked )
self.custom3_button.Bind( wx.EVT_BUTTON, self.on_custom3_clicked )
self.shell_button.Bind( wx.EVT_BUTTON, self.on_shell_clicked )
self.apk_install_button.Bind( wx.EVT_BUTTON, self.on_apk_install_clicked )
self.add_button.Bind( wx.EVT_BUTTON, self.on_add_clicked )
self.remove_button.Bind( wx.EVT_BUTTON, self.on_remove_clicked )
self.logcat_button.Bind( wx.EVT_BUTTON, self.on_logcat_clicked )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def on_main_closed( self, event ):
pass
def on_add_selected( self, event ):
pass
def on_refresh_selected( self, event ):
pass
def on_exit_selected( self, event ):
pass
def on_settings_selected( self, event ):
pass
def on_edit_selected( self, event ):
pass
def on_remove_selected( self, event ):
pass
def on_logcat_selected( self, event ):
pass
def on_releases_selected( self, event ):
pass
def on_issues_selected( self, event ):
pass
def on_about_selected( self, event ):
pass
def on_host_selection_item_activated( self, event ):
pass
def on_host_selection_changed( self, event ):
pass
def on_connect_clicked( self, event ):
pass
def on_disconnect_clicked( self, event ):
pass
def on_custom1_clicked( self, event ):
pass
def on_custom2_clicked( self, event ):
pass
def on_custom3_clicked( self, event ):
pass
def on_shell_clicked( self, event ):
pass
def on_apk_install_clicked( self, event ):
pass
def on_add_clicked( self, event ):
pass
def on_remove_clicked( self, event ):
pass
def on_logcat_clicked( self, event ):
pass
###########################################################################
## Class SettingsFrame
###########################################################################
class SettingsFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = _(u"Settings"), pos = wx.DefaultPosition, size = wx.Size( 502,420 ), style = wx.DEFAULT_FRAME_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_MENU ) )
wrapper = wx.BoxSizer( wx.VERTICAL )
self.settings_notebook = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.path_panel = wx.Panel( self.settings_notebook, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
path_wrapper = wx.BoxSizer( wx.VERTICAL )
adb_box = wx.StaticBoxSizer( wx.StaticBox( self.path_panel, wx.ID_ANY, _(u"ADB") ), wx.VERTICAL )
self.adb_desc_text = wx.StaticText( adb_box.GetStaticBox(), wx.ID_ANY, _(u"Path to ADB command:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.adb_desc_text.Wrap( -1 )
adb_box.Add( self.adb_desc_text, 0, wx.ALL, 5 )
self.adb_file_picker = wx.FilePickerCtrl( adb_box.GetStaticBox(), wx.ID_ANY, wx.EmptyString, _(u"Select a file"), u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE )
adb_box.Add( self.adb_file_picker, 0, wx.ALL|wx.EXPAND, 5 )
adb_box.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.adb_hint_text = wx.StaticText( adb_box.GetStaticBox(), wx.ID_ANY, _(u"Hint:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.adb_hint_text.Wrap( -1 )
adb_box.Add( self.adb_hint_text, 0, wx.ALL|wx.EXPAND, 5 )
self.adb_hint1_text = wx.StaticText( adb_box.GetStaticBox(), wx.ID_ANY, _(u"ADB command localed in \"platform-tools\" directory in the sdk home path."), wx.DefaultPosition, wx.DefaultSize, 0 )
self.adb_hint1_text.Wrap( -1 )
adb_box.Add( self.adb_hint1_text, 0, wx.ALL, 5 )
path_wrapper.Add( adb_box, 1, wx.EXPAND, 5 )
self.path_panel.SetSizer( path_wrapper )
self.path_panel.Layout()
path_wrapper.Fit( self.path_panel )
self.settings_notebook.AddPage( self.path_panel, _(u"Path"), False )
self.custom_buttons_panel = wx.Panel( self.settings_notebook, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
custom_buttons_wrapper = wx.BoxSizer( wx.VERTICAL )
self.buttons_guide_label = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"Label, enable/disable and command:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.buttons_guide_label.Wrap( -1 )
custom_buttons_wrapper.Add( self.buttons_guide_label, 0, wx.ALL, 5 )
custom_buttons_grid = wx.FlexGridSizer( 3, 4, 10, 0 )
custom_buttons_grid.AddGrowableCol( 3 )
custom_buttons_grid.SetFlexibleDirection( wx.BOTH )
custom_buttons_grid.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.custom1_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 1"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom1_text.Wrap( -1 )
custom_buttons_grid.Add( self.custom1_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom1_label = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 1"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom1_label, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom1_enable = wx.CheckBox( self.custom_buttons_panel, wx.ID_ANY, _(u"Enable"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom1_enable, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom1_command = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom1_command.Enable( False )
custom_buttons_grid.Add( self.custom1_command, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
self.custom2_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 2"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom2_text.Wrap( -1 )
custom_buttons_grid.Add( self.custom2_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom2_label = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 2"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom2_label, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom2_enable = wx.CheckBox( self.custom_buttons_panel, wx.ID_ANY, _(u"Enable"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom2_enable, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom2_command = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom2_command.Enable( False )
custom_buttons_grid.Add( self.custom2_command, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5 )
self.custom3_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 3"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom3_text.Wrap( -1 )
custom_buttons_grid.Add( self.custom3_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom3_label = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, _(u"Custom 3"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom3_label, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom3_enable = wx.CheckBox( self.custom_buttons_panel, wx.ID_ANY, _(u"Enable"), wx.DefaultPosition, wx.DefaultSize, 0 )
custom_buttons_grid.Add( self.custom3_enable, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.custom3_command = wx.TextCtrl( self.custom_buttons_panel, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.custom3_command.Enable( False )
custom_buttons_grid.Add( self.custom3_command, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
custom_buttons_wrapper.Add( custom_buttons_grid, 0, wx.EXPAND, 5 )
custom_buttons_wrapper.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.com_hint_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"Command examples:"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.com_hint_text.Wrap( -1 )
custom_buttons_wrapper.Add( self.com_hint_text, 0, wx.ALL|wx.EXPAND, 5 )
self.com_hint1_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"uninstall com.foo.app"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.com_hint1_text.Wrap( -1 )
custom_buttons_wrapper.Add( self.com_hint1_text, 0, wx.ALL|wx.EXPAND, 5 )
self.com_hint2_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"shell chmod 777 /data/foo"), wx.DefaultPosition, wx.DefaultSize, 0 )
self.com_hint2_text.Wrap( -1 )
custom_buttons_wrapper.Add( self.com_hint2_text, 0, wx.ALL|wx.EXPAND, 5 )
self.com_hint3_text = wx.StaticText( self.custom_buttons_panel, wx.ID_ANY, _(u"shell am start -a android.intent.action.VIEW -d http://google.com"), wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
self.com_hint3_text.Wrap( -1 )
custom_buttons_wrapper.Add( self.com_hint3_text, 0, wx.ALL|wx.EXPAND, 5 )
self.adb_command_hyperlink = wx.HyperlinkCtrl( self.custom_buttons_panel, wx.ID_ANY, _(u"ADB Shell Commands | Android Developers"), u"https://developer.android.com/tools/help/shell.html", wx.DefaultPosition, wx.DefaultSize, wx.HL_DEFAULT_STYLE )
custom_buttons_wrapper.Add( self.adb_command_hyperlink, 0, wx.ALL|wx.EXPAND, 5 )
self.custom_buttons_panel.SetSizer( custom_buttons_wrapper )
self.custom_buttons_panel.Layout()
custom_buttons_wrapper.Fit( self.custom_buttons_panel )
self.settings_notebook.AddPage( self.custom_buttons_panel, _(u"Custom Buttons"), False )
wrapper.Add( self.settings_notebook, 1, wx.ALL|wx.EXPAND, 5 )
button_box = wx.BoxSizer( wx.HORIZONTAL )
button_box.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.cancel_button = wx.Button( self, wx.ID_ANY, _(u"Cancel"), wx.DefaultPosition, wx.DefaultSize, 0 )
button_box.Add( self.cancel_button, 0, wx.ALL, 5 )
self.ok_button = wx.Button( self, wx.ID_ANY, _(u"OK"), wx.DefaultPosition, wx.DefaultSize, 0 )
button_box.Add( self.ok_button, 0, wx.ALL, 5 )
wrapper.Add( button_box, 0, wx.EXPAND, 5 )
self.SetSizer( wrapper )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.adb_file_picker.Bind( wx.EVT_FILEPICKER_CHANGED, self.on_adb_path_changed )
self.custom1_enable.Bind( wx.EVT_CHECKBOX, self.on_custom1_enable_changed )
self.custom2_enable.Bind( wx.EVT_CHECKBOX, self.on_custom2_enable_changed )
self.custom3_enable.Bind( wx.EVT_CHECKBOX, self.on_custom3_enable_changed )
self.cancel_button.Bind( wx.EVT_BUTTON, self.on_cancel_clicked )
self.ok_button.Bind( wx.EVT_BUTTON, self.on_ok_clicked )
def | |
"""
File: gromos++ topo file functions
Warnings: this CLASS IS NOT IMPLEMENTED!
TODO:REWORK
Description:
in this lib, gromos topo file mainpulating functions are gathered
Author: <NAME>, <NAME>
"""
#imports
from copy import deepcopy
from typing import TypeVar, Union
import warnings
import math
from pygromos.utils import bash as bash
from pygromos.files._basics import _general_gromos_file, parser
from pygromos.files.blocks import topology_blocks as blocks
TopType = TypeVar("Top")
#functions
def make_topolog(input_arg, build, param, seq, solve="H2O"):
#define python command
command="make_top "+input_arg+" "+param+" "+seq+" "+solve+" \n"
#execute command
try:
bash.execute(command=command)
except Exception as err:
bash.increment_error_level(err_prefix="Could not make_topology due to: ", old_err=err)
return command
def combine_topologies():
raise Exception('not implemented yet!')
def check_top():
raise Exception('not implemented yet!')
#file Classes
class Top(_general_gromos_file._general_gromos_file):
gromos_file_ending:str = "top"
def __init__(self, in_value:(str or dict or None or TopType), _future_file:bool=False):
if type(in_value) is str:
super().__init__(in_value=in_value, _future_file=_future_file)
elif(in_value == None):
self.path = ""
self.block_names = {}
super().__init__(in_value=None)
elif(type(in_value) is __class__):
raise Exception('not implemented yet!')
else:
raise Exception('not implemented yet!')
def __add__(self, top:TopType)->TopType:
return self._add_top(top=top)
def _add_top(self, top:Union[TopType, None], solvFrom1:bool=True, verbose:bool=False)->TopType:
"""
combines two topologies. Parameters are taken from the initial topology.
But missing parameters from the second topology will be added.
Can be used like com_top from Gromos++
Parameters
----------
top : TopType
second topology to add to the first topology
solvFrom1 : bool, optional
should the solvent be taken from the first topology? (else second), by default True
verbose : bool, optional
extra print statements, by default True
sanityCheck : bool, optional
feature and compatibility check, by default True
Returns
-------
TopType
returns a topology made by combing two topologies
"""
# create the return top
retTop = deepcopy(self)
if top is None:
return retTop
# add solv
if not solvFrom1:
if verbose: print("taking solvent from second topology")
retTop.SOLVENTATOM = top.SOLVENTATOM
retTop.SOLVENTCONSTR = top.SOLVENTCONSTR
#calculate the shift of atom types of the second topology and add new atomtypes
atomTypeShift = {}
if not (hasattr(retTop, "ATOMTYPENAME") and len(retTop.ATOMTYPENAME.content)>=2):
setattr(retTop, "ATOMTYPENAME", deepcopy(top.ATOMTYPENAME))
setattr(retTop, "LJPARAMETERS", deepcopy(top.LJPARAMETERS))
for idx, atomT in enumerate(top.ATOMTYPENAME.content[1:]): #new atomtypes to find names for
foundAtomType = False
for mainIdx, mainAtomT in enumerate(retTop.ATOMTYPENAME.content[1:]): #AtomTypes in self to match against
if atomT == mainAtomT:
foundAtomType = True
atomTypeShift.update({idx+1:mainIdx+1})
break
if not foundAtomType:
retTop.ATOMTYPENAME.content[0][0] = str(int(retTop.ATOMTYPENAME.content[0][0]) + 1)
retTop.ATOMTYPENAME.content.append(atomT)
atomTypeShift.update({idx+1:retTop.ATOMTYPENAME.content[0][0]})
ljType = top.get_LJparameter_from_IAC(IAC=idx+1)
retTop.add_new_LJparameter(C6=float(ljType.C6), C12=float(ljType.C12))
if verbose: print("atomTypeShift: " + str(atomTypeShift))
#add RESNAME
for resname in top.RESNAME.content[1:]:
retTop.add_new_resname(resname[0])
#add SOLUTEATOM
if hasattr(retTop, "SOLUTEATOM"):
atnmShift = retTop.SOLUTEATOM.content[-1].ATNM #Number of atoms found in main top. Shift secondary top atoms accordingly
mresShift = retTop.SOLUTEATOM.content[-1].MRES #Number of molecules found in main top.
else:
atnmShift=0
mresShift=0
if verbose: print("atom number shift: " + str(atnmShift))
if verbose: print("molecule number shift: " + str(mresShift))
for atom in top.SOLUTEATOM.content:
retTop.add_new_soluteatom(ATNM = atnmShift + atom.ATNM,
MRES = mresShift + atom.MRES,
PANM = atom.PANM,
IAC = atomTypeShift[atom.IAC],
MASS = atom.MASS,
CG = atom.CG,
CGC = atom.CGC,
INE = [str(int(x)+atnmShift) for x in atom.INEvalues],
INE14 = [str(int(x)+atnmShift) for x in atom.INE14values])
# add bonds and bonds with H
for bond in top.BOND.content:
bondType = top.BONDSTRETCHTYPE.content[bond.ICB - 1]
retTop.add_new_bond(k=bondType.CHB,
b0=bondType.B0,
atomI=bond.IB + atnmShift,
atomJ=bond.JB + atnmShift)
for bond in top.BONDH.content:
bondType = top.BONDSTRETCHTYPE.content[bond.ICB - 1]
retTop.add_new_bond(k=bondType.CHB,
b0=bondType.B0,
atomI=bond.IB + atnmShift,
atomJ=bond.JB + atnmShift,
includesH=True)
# add angles and angles with H
for angle in top.BONDANGLE.content:
angleType = top.BONDANGLEBENDTYPE.content[angle.ICT - 1]
retTop.add_new_angle(k=angleType.CB,
kh=angleType.CHB,
b0=angleType.B0,
atomI=angle.IT + atnmShift,
atomJ=angle.JT + atnmShift,
atomK=angle.KT + atnmShift)
for angle in top.BONDANGLEH.content:
angleType = top.BONDANGLEBENDTYPE.content[angle.ICT - 1]
retTop.add_new_angle(k=angleType.CB,
kh=angleType.CHB,
b0=angleType.B0,
atomI=angle.IT + atnmShift,
atomJ=angle.JT + atnmShift,
atomK=angle.KT + atnmShift, includesH=True)
# add diheadrals and diheadrals with H
for dihdrl in top.DIHEDRAL.content:
dihdrlType = top.TORSDIHEDRALTYPE.content[dihdrl.ICP - 1]
retTop.add_new_torsiondihedral(CP=dihdrlType.CP,
PD=dihdrlType.PD,
NP=dihdrlType.NP,
atomI=dihdrl.IP + atnmShift,
atomJ=dihdrl.JP + atnmShift,
atomK=dihdrl.KP + atnmShift,
atomL=dihdrl.LP + atnmShift)
for dihdrl in top.DIHEDRALH.content:
dihdrlType = top.TORSDIHEDRALTYPE.content[dihdrl.ICPH - 1]
retTop.add_new_torsiondihedral(CP=dihdrlType.CP,
PD=dihdrlType.PD,
NP=dihdrlType.NP,
atomI=dihdrl.IPH + atnmShift,
atomJ=dihdrl.JPH + atnmShift,
atomK=dihdrl.KPH + atnmShift,
atomL=dihdrl.LPH + atnmShift,
includesH=True)
# add impdihedrals with and without H
for dihdrl in top.IMPDIHEDRAL.content:
dihdrlType = top.IMPDIHEDRALTYPE.content[dihdrl.ICQ - 1]
retTop.add_new_impdihedral(CQ=dihdrlType.CQ,
Q0=dihdrlType.Q0,
atomI=dihdrl.IQ + atnmShift,
atomJ=dihdrl.JQ + atnmShift,
atomK=dihdrl.KQ + atnmShift,
atomL=dihdrl.LQ + atnmShift)
for dihdrl in top.IMPDIHEDRALH.content:
dihdrlType = top.IMPDIHEDRALTYPE.content[dihdrl.ICQH - 1]
retTop.add_new_torsiondihedral(CQ=dihdrlType.CQ,
Q0=dihdrlType.Q0,
atomI=dihdrl.IQH + atnmShift,
atomJ=dihdrl.JQH + atnmShift,
atomK=dihdrl.KQH + atnmShift,
atomL=dihdrl.LQH + atnmShift,
includesH=True)
# add SOLUTEMOLECULES
for solmol in top.SOLUTEMOLECULES.content[1:]:
retTop.add_new_SOLUTEMOLECULES(number=str(int(solmol[0]) + atnmShift))
# add TEMPERATUREGROUPS
for solmol in top.TEMPERATUREGROUPS.content[1:]:
retTop.add_new_TEMPERATUREGROUPS(number=str(int(solmol[0]) + atnmShift))
# add PRESSUREGROUPS
for solmol in top.PRESSUREGROUPS.content[1:]:
retTop.add_new_PRESSUREGROUPS(number=str(int(solmol[0]) + atnmShift))
return retTop
def read_file(self):
#Read blocks to string
data = parser.read_general_gromos_file(self._orig_file_path)
#translate the string subblocks
blocks = {}
for block_title in data:
#print(block_title)
self.add_block(blocktitle=block_title, content=data[block_title])
blocks.update({block_title: self.__getattribute__(block_title)})
return blocks
def make_ordered(self, orderList:list=None):
if orderList:
self._block_order = orderList
else:
self._block_order = ["TITLE", "PHYSICALCONSTANTS","TOPVERSION","ATOMTYPENAME","RESNAME","SOLUTEATOM","BONDSTRETCHTYPE","BONDH","BOND","BONDANGLEBENDTYPE","BONDANGLEH","BONDANGLE","IMPDIHEDRALTYPE","IMPDIHEDRALH","IMPDIHEDRAL","TORSDIHEDRALTYPE","DIHEDRALH","DIHEDRAL","CROSSDIHEDRALH","CROSSDIHEDRAL","LJPARAMETERS","SOLUTEMOLECULES","TEMPERATUREGROUPS","PRESSUREGROUPS","LJEXCEPTIONS","SOLVENTATOM","SOLVENTCONSTR"]
def get_num_atomtypes(self) -> int:
if not hasattr(self, "ATOMTYPENAME"):
return 0
else:
return int(self.ATOMTYPENAME.content[0][0])
def add_new_atomtype(self, name:str, verbose=False):
if not hasattr(self, "ATOMTYPENAME"):
defaultContent=['0', 'Dummy']
self.add_block(blocktitle="ATOMTYPENAME", content=defaultContent, verbose=verbose)
self.ATOMTYPENAME.content.append([name])
self.ATOMTYPENAME.content.remove(['Dummy'])
else:
if len(self.ATOMTYPENAME.content) < 1:
self.ATOMTYPENAME.content.append(["0"])
self.ATOMTYPENAME.content.append([name])
self.ATOMTYPENAME.content[0][0] = str(int(self.ATOMTYPENAME.content[0][0])+1)
def add_new_resname(self, name:str, verbose=False):
if not hasattr(self, "RESNAME"):
defaultContent=['0', 'Dummy']
self.add_block(blocktitle="RESNAME", content=defaultContent, verbose=verbose)
self.RESNAME.content.append([name])
self.RESNAME.content.remove(['Dummy'])
else:
if len(self.RESNAME.content) < 1:
self.RESNAME.content.append(["0"])
self.RESNAME.content.append([name])
self.RESNAME.content[0][0] = str(int(self.RESNAME.content[0][0])+1)
def add_new_soluteatom(self, ATNM:int=0, MRES:int=0, PANM:str="", IAC:int=0, MASS:float=0, CG:float=0, CGC:int=0, INE:list=[], INE14:list=[], verbose=False):
if not hasattr(self, "SOLUTEATOM"):
self.add_block(blocktitle="SOLUTEATOM", content=[], verbose=verbose)
self.SOLUTEATOM.NRP = 0
# some auto set methods
if ATNM == 0:
ATNM = len(self.SOLUTEATOM.content) + 1
if MRES == 0:
if len(self.SOLUTEATOM.content) >= 1:
MRES = self.SOLUTEATOM.content[-1].MRES + 1
else:
MRES = 1
#create new entry
entry = blocks.soluteatom_type(ATNM=ATNM, MRES=MRES, PANM=PANM, IAC=IAC, MASS=MASS, CG=CG, CGC=CGC, INE=len(INE), INEvalues=INE, INE14=len(INE14), INE14values=INE14)
self.SOLUTEATOM.content.append(entry)
self.SOLUTEATOM.NRP += 1
def add_new_bond(self, k:float, b0:float, atomI:int, atomJ:int, includesH:bool = False, verbose=False):
#check if all classes are ready, if not create
if not hasattr(self, "BONDSTRETCHTYPE"):
self.add_block(blocktitle="BONDSTRETCHTYPE", content=list(), verbose=verbose)
if includesH:
if not hasattr(self, "BONDH"):
self.add_block(blocktitle="BONDH", content=list(), verbose=verbose)
else:
if not hasattr(self, "BOND"):
self.add_block(blocktitle="BOND", content=list(), verbose=verbose)
# find the bondstretchtype number or create new bondstretchtype
# TODO: add quartic force (CB)
bond_type_number = 0
iterator = 1
quartic = k/(2*(b0**2))
newBondStretchType = blocks.bondstretchtype_type(CB=quartic, CHB=k, B0=b0)
for bond_type in self.BONDSTRETCHTYPE.content:
if bond_type.CHB == newBondStretchType.CHB and bond_type.B0 == newBondStretchType.B0:
break
else:
iterator += 1
bond_type_number = iterator
if iterator > len(self.BONDSTRETCHTYPE.content):#bond type was not found -> add new bondtype
self.BONDSTRETCHTYPE.content.append(newBondStretchType)
self.BONDSTRETCHTYPE.NBTY += 1
#create new bond TODO: maybe check if already exists. But I will asume smart users
newBond = blocks.top_bond_type(IB=atomI, JB=atomJ, ICB=bond_type_number)
#check if we are adding a bond to BOND or BONDH
if includesH:
self.BONDH.content.append(newBond)
self.BONDH.NBONH += 1
else:
self.BOND.content.append(newBond)
self.BOND.NBON += 1
def add_new_angle(self, k:float, kh:float, b0:float, atomI:int, atomJ:int, atomK:int, includesH:bool = False, verbose=False):
#check if all classes are ready, if not create
if not hasattr(self, "BONDANGLEBENDTYPE"):
self.add_block(blocktitle="BONDANGLEBENDTYPE", content=[], verbose=verbose)
if includesH:
if not hasattr(self, "BONDANGLEH"):
self.add_block(blocktitle="BONDANGLEH", content=[], verbose=verbose)
else:
if not hasattr(self, "BONDANGLE"):
self.add_block(blocktitle="BONDANGLE", content=[], verbose=verbose)
# find the BONDANGLEBENDTYPE number or create new BONDANGLEBENDTYPE
# TODO: add harmonic in the angle cosine force (CT)
angle_type_number = 0
iterator = 1
for angle_type in self.BONDANGLEBENDTYPE.content:
if angle_type.CB == k and angle_type.B0 == b0:
break
else:
iterator += 1
angle_type_number = iterator
if iterator > len(self.BONDANGLEBENDTYPE.content):#angle type was not found -> add new bondtype
newBONDANGLEBENDTYPE = blocks.bondstretchtype_type(CB=k, CHB=kh, B0=b0)
self.BONDANGLEBENDTYPE.content.append(newBONDANGLEBENDTYPE)
self.BONDANGLEBENDTYPE.NBTY += 1
#create new angle TODO: maybe check if already exists. But I will asume smart users
newAngle = blocks.bondangle_type(IT=atomI, JT=atomJ, KT=atomK, ICT=angle_type_number)
#check if we are adding a bond to BONDANGLE or BONDANGLEH
if includesH:
self.BONDANGLEH.content.append(newAngle)
self.BONDANGLEH.NTHEH += 1
else:
self.BONDANGLE.content.append(newAngle)
self.BONDANGLE.NTHE += 1
def add_new_torsiondihedral(self, CP:float, PD:float, NP:int, atomI:int, atomJ:int, atomK:int, atomL:int, includesH:bool = False, verbose=False):
#check if all classes are ready, if not create
if not hasattr(self, "TORSDIHEDRALTYPE"):
self.add_block(blocktitle="TORSDIHEDRALTYPE", content=[], verbose=verbose)
if includesH:
if not hasattr(self, "DIHEDRALH"):
self.add_block(blocktitle="DIHEDRALH", content=[], verbose=verbose)
else:
if not hasattr(self, "DIHEDRAL"):
self.add_block(blocktitle="DIHEDRAL", content=[], verbose=verbose)
# find the TORSDIHEDRALTYPE number or create new TORSDIHEDRALTYPE
torsion_type_number = 0
iterator = 1
for torsion_type in self.TORSDIHEDRALTYPE.content:
if torsion_type.CP == CP and torsion_type.PD == PD | |
'get_qos_policy_group_name_from_info',
return_value=fake.QOS_POLICY_GROUP_NAME)
self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive',
return_value=is_adaptive)
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
mock_set_lun_qos_policy_group = self.mock_object(
self.zapi_client, 'set_lun_qos_policy_group')
self.library.manage_existing({'name': 'name'}, {'ref': 'ref'})
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.assertEqual(0, self.zapi_client.move_lun.call_count)
self.assertEqual(1, mock_set_lun_qos_policy_group.call_count)
def test_manage_existing_lun_new_path(self):
mock_lun = block_base.NetAppLun(
'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'})
self.library._get_existing_vol_with_manage_ref = mock.Mock(
return_value=mock_lun)
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.library._check_volume_type_for_lun = mock.Mock()
self.library._setup_qos_for_volume = mock.Mock()
self.mock_object(na_utils, 'get_qos_policy_group_name_from_info',
return_value=None)
self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive',
return_value=False)
self.library._add_lun_to_table = mock.Mock()
self.zapi_client.move_lun = mock.Mock()
self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'})
self.assertEqual(
2, self.library._get_existing_vol_with_manage_ref.call_count)
self.assertEqual(1, self.library._check_volume_type_for_lun.call_count)
self.assertEqual(1, self.library._add_lun_to_table.call_count)
self.zapi_client.move_lun.assert_called_once_with(
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
@ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']},
{'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']},
{'secondary_id': 'dev1', 'configured_targets': []},
{'secondary_id': None, 'configured_targets': []})
@ddt.unpack
def test_failover_host_invalid_replication_target(self, secondary_id,
configured_targets):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=configured_targets)
complete_failover_call = self.mock_object(
data_motion.DataMotionMixin, '_complete_failover')
self.assertRaises(exception.InvalidReplicationTarget,
self.library.failover_host, 'fake_context', [],
secondary_id=secondary_id)
self.assertFalse(complete_failover_call.called)
def test_failover_host_unable_to_failover(self):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(
data_motion.DataMotionMixin, '_complete_failover',
side_effect=na_utils.NetAppDriverException)
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=['dev1', 'dev2'])
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
self.mock_object(self.library, '_update_zapi_client')
self.assertRaises(exception.UnableToFailOver,
self.library.failover_host, 'fake_context', [],
secondary_id='dev1')
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
failover_target='dev1')
self.assertFalse(self.library._update_zapi_client.called)
def test_failover_host(self):
"""This tests executes a method in the DataMotionMixin."""
self.library.backend_name = 'dev0'
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
return_value=('dev1', []))
self.mock_object(data_motion.DataMotionMixin,
'get_replication_backend_names',
return_value=['dev1', 'dev2'])
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
return_value=fake_utils.SSC.keys())
self.mock_object(self.library, '_update_zapi_client')
actual_active, vol_updates, __ = self.library.failover_host(
'fake_context', [], secondary_id='dev1', groups=[])
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
failover_target='dev1')
self.library._update_zapi_client.assert_called_once_with('dev1')
self.assertTrue(self.library.failed_over)
self.assertEqual('dev1', self.library.failed_over_backend_name)
self.assertEqual('dev1', actual_active)
self.assertEqual([], vol_updates)
def test_add_looping_tasks(self):
mock_update_ssc = self.mock_object(self.library, '_update_ssc')
mock_handle_housekeeping = self.mock_object(
self.library, '_handle_housekeeping_tasks')
mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task')
mock_super_add_looping_tasks = self.mock_object(
block_base.NetAppBlockStorageLibrary, '_add_looping_tasks')
self.library._add_looping_tasks()
mock_update_ssc.assert_called_once_with()
mock_add_task.assert_has_calls([
mock.call(mock_update_ssc,
loopingcalls.ONE_HOUR,
loopingcalls.ONE_HOUR),
mock.call(mock_handle_housekeeping,
loopingcalls.TEN_MINUTES,
0)])
mock_super_add_looping_tasks.assert_called_once_with()
def test_get_backing_flexvol_names(self):
mock_ssc_library = self.mock_object(
self.library.ssc_library, 'get_ssc')
self.library._get_backing_flexvol_names()
mock_ssc_library.assert_called_once_with()
def test_create_group(self):
model_update = self.library.create_group(
fake.VOLUME_GROUP)
self.assertEqual('available', model_update['status'])
def test_delete_group_volume_delete_failure(self):
self.mock_object(block_cmode, 'LOG')
self.mock_object(self.library, '_delete_lun', side_effect=Exception)
model_update, volumes = self.library.delete_group(
fake.VOLUME_GROUP, [fake.VG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
self.assertEqual(1, block_cmode.LOG.exception.call_count)
def test_update_group(self):
model_update, add_volumes_update, remove_volumes_update = (
self.library.update_group(fake.VOLUME_GROUP))
self.assertIsNone(model_update)
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
def test_delete_group_not_found(self):
self.mock_object(block_cmode, 'LOG')
self.mock_object(self.library, '_get_lun_attr', return_value=None)
model_update, volumes = self.library.delete_group(
fake.VOLUME_GROUP, [fake.VG_VOLUME])
self.assertEqual(0, block_cmode.LOG.error.call_count)
self.assertEqual(0, block_cmode.LOG.info.call_count)
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
def test_create_group_snapshot_raise_exception(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
mock_extract_host = self.mock_object(
volume_utils, 'extract_host', return_value=fake.POOL_NAME)
self.mock_object(self.zapi_client, 'create_cg_snapshot',
side_effect=netapp_api.NaApiError)
self.assertRaises(na_utils.NetAppDriverException,
self.library.create_group_snapshot,
fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT])
mock_extract_host.assert_called_once_with(
fake.VG_SNAPSHOT['volume']['host'], level='pool')
def test_create_group_snapshot(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=False)
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.mock_object(self.library, '_get_lun_from_table',
return_value=fake_lun)
mock__clone_lun = self.mock_object(self.library, '_clone_lun')
model_update, snapshots_model_update = (
self.library.create_group_snapshot(fake.VOLUME_GROUP,
[fake.SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock__clone_lun.assert_called_once_with(fake_lun.name,
fake.SNAPSHOT['name'],
space_reserved='false',
is_snapshot=True)
def test_create_consistent_group_snapshot(self):
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
self.mock_object(volume_utils, 'extract_host',
return_value=fake.POOL_NAME)
mock_create_cg_snapshot = self.mock_object(
self.zapi_client, 'create_cg_snapshot')
mock__clone_lun = self.mock_object(self.library, '_clone_lun')
mock_wait_for_busy_snapshot = self.mock_object(
self.zapi_client, 'wait_for_busy_snapshot')
mock_delete_snapshot = self.mock_object(
self.zapi_client, 'delete_snapshot')
model_update, snapshots_model_update = (
self.library.create_group_snapshot(fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock_create_cg_snapshot.assert_called_once_with(
set([fake.POOL_NAME]), fake.VOLUME_GROUP['id'])
mock__clone_lun.assert_called_once_with(
fake.VG_SNAPSHOT['volume']['name'],
fake.VG_SNAPSHOT['name'],
source_snapshot=fake.VOLUME_GROUP['id'])
mock_wait_for_busy_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.VOLUME_GROUP['id'])
mock_delete_snapshot.assert_called_once_with(
fake.POOL_NAME, fake.VOLUME_GROUP['id'])
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_group_from_src_snapshot(self, volume_model_update):
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_group_from_src(
fake.VOLUME_GROUP, [fake.VOLUME], group_snapshot=fake.VG_SNAPSHOT,
snapshots=[fake.VG_VOLUME_SNAPSHOT])
clone_source_to_destination_args = {
'name': fake.VG_SNAPSHOT['name'],
'size': fake.VG_SNAPSHOT['volume_size'],
}
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
self.assertEqual(expected_return_value, actual_return_value)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_group_from_src_group(self, volume_model_update):
lun_name = fake.SOURCE_VG_VOLUME['name']
mock_lun = block_base.NetAppLun(
lun_name, lun_name, '3', {'UUID': 'fake_uuid'})
self.mock_object(self.library, '_get_lun_from_table',
return_value=mock_lun)
mock_clone_source_to_destination = self.mock_object(
self.library, '_clone_source_to_destination',
return_value=volume_model_update)
actual_return_value = self.library.create_group_from_src(
fake.VOLUME_GROUP, [fake.VOLUME],
source_group=fake.SOURCE_VOLUME_GROUP,
source_vols=[fake.SOURCE_VG_VOLUME])
clone_source_to_destination_args = {
'name': fake.SOURCE_VG_VOLUME['name'],
'size': fake.SOURCE_VG_VOLUME['size'],
}
if volume_model_update:
volume_model_update['id'] = fake.VOLUME['id']
expected_return_value = ((None, [volume_model_update])
if volume_model_update else (None, []))
mock_clone_source_to_destination.assert_called_once_with(
clone_source_to_destination_args, fake.VOLUME)
self.assertEqual(expected_return_value, actual_return_value)
def test_delete_group_snapshot(self):
mock__delete_lun = self.mock_object(self.library, '_delete_lun')
model_update, snapshots_model_update = (
self.library.delete_group_snapshot(fake.VOLUME_GROUP,
[fake.VG_SNAPSHOT]))
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
mock__delete_lun.assert_called_once_with(fake.VG_SNAPSHOT['name'])
def test_move_lun(self):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {'job-status': 'complete'}
mock_start_lun_move = self.mock_object(self.zapi_client,
'start_lun_move',
return_value=fake.JOB_UUID)
mock_get_lun_move_status = self.mock_object(
self.zapi_client, 'get_lun_move_status',
return_value=fake_job_status)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._move_lun(
fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_start_lun_move.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_move_status.assert_called_once_with(fake.JOB_UUID)
self.assertIsNone(result)
@ddt.data(('data', na_utils.NetAppDriverTimeout),
('destroyed', na_utils.NetAppDriverException))
@ddt.unpack
def test_move_lun_error(self, status_on_error, move_exception):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {
'job-status': status_on_error,
'last-failure-reason': None
}
mock_start_lun_move = self.mock_object(self.zapi_client,
'start_lun_move',
return_value=fake.JOB_UUID)
mock_get_lun_move_status = self.mock_object(
self.zapi_client, 'get_lun_move_status',
return_value=fake_job_status)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(move_exception,
self.library._move_lun,
fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_start_lun_move.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_move_status.assert_called_with(fake.JOB_UUID)
def test_cancel_lun_copy(self):
mock_cancel_lun_copy = self.mock_object(self.zapi_client,
'cancel_lun_copy')
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(self.zapi_client,
'destroy_lun')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_not_called()
mock_destroy_lun.assert_not_called()
self.assertIsNone(result)
def test_cancel_lun_copy_force_destroy_lun(self):
mock_cancel_lun_copy = self.mock_object(
self.zapi_client, 'cancel_lun_copy',
side_effect=na_utils.NetAppDriverException)
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(self.zapi_client, 'destroy_lun')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_called_once_with(
fake.DEST_BACKEND_NAME)
fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name)
mock_destroy_lun.assert_called_once_with(fake_lun_path)
self.assertIsNone(result)
def test_cancel_lun_copy_error_on_force_destroy_lun(self):
mock_cancel_lun_copy = self.mock_object(
self.zapi_client, 'cancel_lun_copy',
side_effect=na_utils.NetAppDriverException)
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(
self.zapi_client, 'destroy_lun',
side_effect=na_utils.NetAppDriverException)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_called_once_with(
fake.DEST_BACKEND_NAME)
fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name)
mock_destroy_lun.assert_called_once_with(fake_lun_path)
self.assertIsNone(result)
def test_copy_lun(self):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {'job-status': 'complete'}
mock_start_lun_copy = self.mock_object(self.zapi_client,
'start_lun_copy',
return_value=fake.JOB_UUID)
mock_get_lun_copy_status = self.mock_object(
self.zapi_client, 'get_lun_copy_status',
return_value=fake_job_status)
mock_cancel_lun_copy = self.mock_object(
self.library, '_cancel_lun_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._copy_lun(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True)
mock_start_lun_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_copy_status.assert_called_once_with(fake.JOB_UUID)
mock_cancel_lun_copy.assert_not_called()
self.assertIsNone(result)
@ddt.data(('data', na_utils.NetAppDriverTimeout),
('destroyed', na_utils.NetAppDriverException),
('destroyed', na_utils.NetAppDriverException))
@ddt.unpack
def test_copy_lun_error(self, status_on_error, copy_exception):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {
'job-status': status_on_error,
'last-failure-reason': None
}
mock_start_lun_copy = self.mock_object(self.zapi_client,
'start_lun_copy',
return_value=fake.JOB_UUID)
mock_get_lun_copy_status = self.mock_object(
self.zapi_client, 'get_lun_copy_status',
return_value=fake_job_status)
mock_cancel_lun_copy = self.mock_object(
self.library, '_cancel_lun_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_exception,
self.library._copy_lun,
fake_vol,
fake.POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_start_lun_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_copy_status.assert_called_with(fake.JOB_UUID)
mock_cancel_lun_copy.assert_called_once_with(
fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME)
def test_migrate_volume_to_pool(self):
mock_move_lun = self.mock_object(self.library, '_move_lun')
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_pool(fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_called_once_with(
fake_vol, fake.DEST_POOL_NAME)
self.assertEqual({}, updates)
def test_migrate_volume_to_pool_lun_move_error(self):
mock_move_lun = self.mock_object(
self.library, '_move_lun',
side_effect=na_utils.NetAppDriverException)
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(na_utils.NetAppDriverException,
self.library._migrate_volume_to_pool,
fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_not_called()
def test_migrate_volume_to_pool_lun_move_timeout(self):
mock_move_lun = self.mock_object(
self.library, '_move_lun',
side_effect=na_utils.NetAppDriverTimeout)
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_pool(fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_called_once_with(
fake_vol, fake.DEST_POOL_NAME)
self.assertEqual({'status': fields.VolumeStatus.MAINTENANCE}, updates)
def test_finish_migrate_volume_to_pool(self):
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_lun_cache = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
fake.SIZE, None)
mock_get_lun_from_table = self.mock_object(self.library,
'_get_lun_from_table',
return_value=fake_lun_cache)
self.library._finish_migrate_volume_to_pool(fake_vol,
fake.DEST_POOL_NAME)
mock_get_lun_from_table.assert_called_once_with(fake_vol.name)
expected = {
'Path': '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name),
'Volume': fake.DEST_POOL_NAME
}
self.assertEqual(expected, fake_lun_cache.metadata)
def test_migrate_volume_to_vserver(self):
self.library.using_cluster_credentials = True
self.library.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(self.library, '_copy_lun')
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_vserver(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['lun_copy'])
mock_copy_lun.assert_called_once_with(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_migrate_volume_to_vserver.assert_called_once_with(fake_vol)
self.assertEqual({}, updates)
@ddt.data(na_utils.NetAppDriverException, na_utils.NetAppDriverTimeout)
def test_migrate_volume_to_vserver_error_on_copy(self, copy_error):
self.library.using_cluster_credentials = True
self.library.backend_name = fake.BACKEND_NAME
self.library.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(
self.library, '_copy_lun',
side_effect=copy_error)
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_error,
self.library._migrate_volume_to_vserver,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['lun_copy'])
mock_copy_lun.assert_called_once_with(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_migrate_volume_to_vserver.assert_not_called()
def test_migrate_volume_to_vserver_volume_is_not_available(self):
self.library.using_cluster_credentials = True
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(self.library, '_copy_lun')
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.IN_USE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(exception.InvalidVolume,
self.library._migrate_volume_to_vserver,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_not_called()
mock_copy_lun.assert_not_called()
mock_finish_migrate_volume_to_vserver.assert_not_called()
def test_migrate_volume_to_vserver_invalid_vserver_peer_applications(self):
self.library.using_cluster_credentials = | |
<reponame>LX-doctorAI/LR1
import numpy as np
from torchvision import datasets, transforms
from tqdm import tqdm
import os
import pickle as pkl
import torch
import pickle
import time
from torch.utils.data import DataLoader, Dataset
from PIL import Image
class FastDataSet(Dataset):
def __init__(self, data_path, label_path, transform4img):
[train_img, train_label] = loadTinyImageNet(data_path, label_path,
transform4img)
self.img = train_img
self.label = train_label
self.len = len(self.label)
def __getitem__(self, item):
return self.img[item], self.label[item]
def __len__(self):
return self.len
def loadTinyImageNet(data_path, label_file, transform4img):
label_list = []
train_img = []
train_label = []
with open(label_file, 'r') as winds:
for line in winds:
label_list.append(line.strip('\n'))
for index, label in enumerate(label_list):
train_label_imgs_path = os.path.join(data_path, label)
train_img_file_list = os.listdir(train_label_imgs_path)
for path in train_img_file_list:
path = os.path.join(train_label_imgs_path, path)
pic = Image.open(path)
pic = transform4img(pic)
train_img.append(pic)
train_label.append(index)
train_img = [img.numpy() for img in train_img] # torch.from_numpy(np.array(train_img))
train_img = torch.Tensor(train_img)
train_label = torch.Tensor(np.array(train_label)).long()
return train_img, train_label
def TinyImageNetLoader(data_path, label_path, transform4img, batch_size, shuffle=True):
dataset = FastDataSet(data_path, label_path, transform4img)
return DataLoader(dataset, batch_size, shuffle)
device = torch.device('cuda:5')
def evalHot(y, pred):
"""
评估效果
:param y:真实值的独热编码
:param pred: 预测值的输出
:return: 正确的个数
"""
_y = torch.argmax(y, dim=-1)
_pred = torch.argmax(pred, dim=-1)
N = np.sum((_y == _pred).cpu().numpy())
return N
def KMeansRepeatX(X, repeat, train=True):
"""
:param X:Raw data \\in R^{batch_size X n_dim}
:param repeat:重复的次数、采样数
:return: 加了偏置项和重复数据的样本 维度[batch_size,repeat,n_dum+1]
"""
X = X.reshape(len(X), -1)
if train:
repeatX = torch.cat([X] * repeat, dim=0).to(device)
one_shape = tuple(repeatX.shape[:-1]) + (1,)
one = torch.ones(size=one_shape, dtype=torch.float).to(device)
return torch.cat([repeatX, one], dim=-1)
else:
one = torch.ones(tuple(X.shape[:-1]) + (1,), dtype=torch.float).to(device)
return torch.cat([X, one], dim=-1)
def OneHotLabel(Y, n):
"""
:param Y:序列型标签
:param n: 标签数目
:return: 标签的独热编码
"""
y = torch.zeros([len(Y), n]).to(device)
y[torch.arange(0, len(Y)), Y] = 1
return y
def KMeansRepeatY(Y, repeat):
# print(Y.shape)
repeatY = torch.cat([Y] * repeat, dim=0)
return repeatY
class Activation:
"""
包含激活函数
"""
@staticmethod
def logistic(z):
return 1 / (1 + torch.exp(-z))
@staticmethod
def softmax(z):
stable_exps = torch.exp(z)
return stable_exps / stable_exps.sum(dim=-1, keepdim=True)
@staticmethod
def threshold(z):
z[z < 0] = 0
return torch.sign(z)
@staticmethod
def relu(z):
z[z < 0] = 0
return z
def CELoss(Y, T):
"""
:param Y:模型输出
:param T: 样本标签
:return: 交叉熵损失
"""
return -(T * torch.log(Y)).sum(dim=-1)
class Layer:
def __init__(self, n_input, n_output, sigma, activation):
"""
:param n_input:输入维度
:param n_output: 输出维度
:param sigma: 方差
:param activation: 激活函数
"""
self.w = torch.randn(size=[n_input, n_output]).to(device) # 多出来的是bias
self.w *= (2 / self.w.shape[0] ** 0.5)
self.sigma = sigma
self.n_input = n_input
self.n_output = n_output
self.input = None
self.output = None
self.noise = None
self.activation = activation
print('该层输入:{} 输出:{}\n激活函数:{}'.format(self.n_input, self.n_output, self.activation))
self.bp_grad = None
self.lr_grad = None
self.adam_beta1 = 0.9
self.adam_beta2 = 0.999
self.adam_last_mt = 0
self.adam_last_vt = 0
self.t = 0
self.epsilon = 1e-8
self.nadam_alpha = 0.9
self.nadam_beta = 0.999
self.nadam_s = 0
self.nadam_r = 0
self.batchSize = None
self.momentum = 0
self.momentum_beta = 0.9
def get_params(self):
return self.w
def forward(self, x, train=False, BP=False):
self.input = x
self.batchSize = len(x)
if BP:
# print(self.input.shape)
# print(self.w.shape)
self.output = self.input.matmul(self.w)
if self.activation:
self.output = self.activation(self.output)
return self.output
else:
if not train:
self.output = self.input.matmul(self.w)
if self.activation:
self.output = self.activation(self.output)
return self.output
else:
self.noise = torch.randn([len(self.input), self.n_output], device=device) * self.sigma
# self.noise = np.random.laplace(loc=0, scale=self.sigma, size=[len(self.input), self.n_output])
# self.noise = torch.from_numpy(self.noise).to(device)
self.output = self.input.matmul(self.w) + self.noise
if self.activation:
self.output = self.activation(self.output)
return self.output
def backward(self, target, BP=True):
"""
:param target: BP训练模式下,target是残差;LR训练模式下,target是损失
:param BP: 是否为BP训练
:return: BP训练模式下,返回残差;LR训练模式下,返回损失
"""
if BP:
eta = target
if self.activation == Activation.softmax:
eta = self.output - eta
elif self.activation == Activation.logistic:
eta = self.output * (1 - self.output) * eta
else:
print('尚未注册!\n')
exit()
batch_size = len(self.input)
grad = self.input.T.matmul(eta)
self.bp_grad = grad / batch_size
return torch.einsum('ij,kj->ik', eta, self.w)
else:
term = self.input * target[:, np.newaxis]
batch_grad = torch.einsum('ni, nj->ij', term, self.noise)
batch_grad /= self.sigma ** 2
batch_grad /= len(self.input) # torch.mean(batch_grad, dim=0)
self.lr_grad = batch_grad
return target
def update_params(self, learning_rate, BP=True, method='sgd', weight_decay=1e-4):
if BP:
grad = self.bp_grad
else:
grad = self.lr_grad
# grad += weight_decay * self.w / self.batchSize
if method == 'sgd':
self.t += 1
self.w -= learning_rate * grad
return
elif method == 'adam':
self.t += 1
self.adam_last_mt = self.adam_beta1 * self.adam_last_mt + (1 - self.adam_beta1) * grad
self.adam_last_vt = self.adam_beta2 * self.adam_last_vt + (1 - self.adam_beta2) * grad ** 2
adam_mt_cap = self.adam_last_mt / (1 - self.adam_beta1 ** self.t)
adam_vt_cap = self.adam_last_vt / (1 - self.adam_beta2 ** self.t)
self.w -= learning_rate * adam_mt_cap / (torch.sqrt(adam_vt_cap) + self.epsilon)
return
elif method == 'nadam':
self.t += 1
self.nadam_s = self.nadam_alpha * self.nadam_s + (1 - self.nadam_alpha) * grad
self.nadam_r = self.nadam_beta * self.nadam_r + (1 - self.nadam_beta) * grad ** 2
sqrt_term = 1 - self.nadam_beta ** self.t
sqrt_term = sqrt_term ** 0.5
lr = learning_rate * sqrt_term / (1 - self.nadam_alpha ** self.t)
delta = lr * (self.nadam_alpha * self.nadam_s + (1 - self.nadam_alpha) * grad) / torch.sqrt(self.nadam_r)
self.w -= delta
elif method == 'momentum':
self.t += 1
if self.t < 100:
self.w -= learning_rate * grad
return
self.momentum = self.momentum_beta * self.momentum - learning_rate * grad
self.w += self.momentum
return
else:
print('优化方法尚未注册')
exit()
class Network(object):
def __init__(self, n_input, units_per_layers: list, activation_per_layers: list, sigma):
assert len(units_per_layers) == len(activation_per_layers)
self.n_layers = len(units_per_layers)
self.params = [(n_input, units_per_layers[0], sigma, activation_per_layers[0])]
for i in range(self.n_layers - 1):
self.params.append(
(units_per_layers[i], units_per_layers[i + 1], sigma,
activation_per_layers[i + 1]))
self.layers = [Layer(*self.params[i]) for i in range(self.n_layers)]
print('模型层数为:{} 各层及对应的激活函数为:{}'.format(len(self.layers),
[(units_per_layers[i], activation_per_layers[i]) for i in
range(self.n_layers)]))
def forward(self, X, train=True, BP=False):
z = X
for layer in self.layers:
# print(BP)
z = layer.forward(z, train, BP)
return z
def backward(self, target, BP=True):
"""
:param target:BP训练方式下target是标签 LR训练方式下target是损失
:param BP: 是否为BP模式
:return: None
"""
if BP:
for i in range(self.n_layers - 1, -1, -1):
target = self.layers[i].backward(target, BP)
else:
for layer in self.layers:
layer.backward(target, BP)
def update_params(self, learning_rate, BP=True, method='sgd', weight_decay=1e-4):
for layer in self.layers:
layer.update_params(learning_rate, BP, method=method, weight_decay=weight_decay)
def saveModel(self, path):
weights_list = []
for layer in self.layers:
weights_list.append(layer.w.cpu().detach().numpy())
with open(path, 'wb') as file:
pkl.dump(weights_list, file)
def loadModel(self, path):
with open(path, 'rb') as file:
weight_list = pkl.load(file)
for index, layer in enumerate(self.layers):
layer.w = weight_list[index].to(device)
if __name__ == "__main__":
label_file = '../data/IMagenet-master/tiny-imagenet-200/label.txt'
train_path = '../data/IMagenet-master/tiny-imagenet-200/train_LR'
test_path = '../data/IMagenet-master/tiny-imagenet-200/test_LR'
h = 32
transform = transforms.Compose([
transforms.Resize((h, h)),
transforms.Grayscale(),
transforms.ToTensor(),
])
train_loss = []
test_loss = []
acc = []
time_list = []
batch_size = 128
method = 'adam'
n_input = h * h + 1
n_output = 10
sigma = 1.0
epoches = 1000
loss = 0.
reuse = False
BP_train = False
repeat_n = 500
net_arc = [300, 100, 50, n_output]
learning_rate = 1e-3
start_epoch = 0
trainLoader = TinyImageNetLoader(train_path, label_file, transform, batch_size=batch_size)
testLoader = TinyImageNetLoader(test_path, label_file, transform, batch_size=batch_size)
net_act = [Activation.relu for i in range(len(net_arc) - 1)]
net_act.append(Activation.softmax)
assert len(net_arc) == len(net_act)
n_layers = len(net_arc)
net = Network(n_input, net_arc, net_act, sigma)
trainLoss = 0.
testLoss = 0.
print('epoch to run:{} learning rate:{}'.format(epoches, learning_rate))
start = time.time()
print('模型信息:\narc:{}\nact:{}\nK:{}'.format(net_arc, net_act, repeat_n))
best = 0.
for epoch in range(start_epoch, start_epoch + epoches):
loss = 0.
nbatch = 0.
N = 0.
n = 0.
trainLoss = 0.
train_estimation_relative_error = 0
for batch, [trainX, trainY] in enumerate(tqdm(trainLoader, ncols=10)):
# break
nbatch += 1
trainX = trainX.to(device)
trainY = trainY.to(device)
trainX = torch.sin(trainX)
trainY = OneHotLabel(trainY, n_output)
batch_train_repeatX, batch_train_repeatY = KMeansRepeatX(trainX, repeat_n), KMeansRepeatY(trainY,
repeat_n)
pre = net.forward(batch_train_repeatX, train=True, BP=BP_train)
loss = CELoss(pre, batch_train_repeatY)
trainLoss += torch.mean(loss).detach().cpu().numpy()
if BP_train:
net.backward(batch_train_repeatY, BP_train)
net.update_params(learning_rate, BP_train)
else:
net.backward(loss, BP_train)
net.update_params(learning_rate, BP_train, method)
trainLoss /= nbatch
train_loss.append(trainLoss)
print('train epoch:{} loss:{}'.format(epoch, trainLoss))
if ((epoch + 1) % 100 == 0):
learning_rate *= 0.8
print('学习率衰减至{}'.format(learning_rate))
loss = 0.
N = 0.
n = 0.
nbatch = 0.
test_estimation_relative_error = 0
for batch, [testX, testY] in enumerate(tqdm(testLoader, ncols=10)):
nbatch += 1
testX = testX.to(device)
testY = testY.to(device)
testX = torch.sin(testX)
testX = KMeansRepeatX(testX, 1, False)
testY = OneHotLabel(testY, n_output)
pre = net.forward(testX, train=False)
testLoss += torch.mean(CELoss(pre, testY)).detach().cpu().numpy()
N += len(testX)
n += evalHot(testY, pre)
testLoss /= nbatch
test_loss.append(testLoss)
testAcc = n / N
acc.append(testAcc)
print('test epoch:{} loss:{} acc:{}'.format(epoch, testLoss, n / N))
time_list.append(time.time() - start)
# net.save_weights(os.path.join('./models_mnist', model_path), epoch, learning_rate)
print('train_loss:{}\n test_loss:{}\n acc:{}'.format(train_loss, test_loss, acc))
print('time:{}'.format(time_list))
with open('LR_3.pkl'.format(n_layers, repeat_n, method), 'wb') as file:
pkl.dump(
[train_loss, test_loss, acc], file)
net.saveModel('model/LR_3.pkl'.format(n_layers, | |
new values in text boxes
# Format with 8 decimal places.
#--------------------------------
self.map_minlon.value = "{:.8f}".format( minlon )
self.map_maxlon.value = "{:.8f}".format( maxlon )
self.map_maxlat.value = "{:.8f}".format( maxlat )
self.map_minlat.value = "{:.8f}".format( minlat )
# replace_map_bounds()
#--------------------------------------------------------------------
# def replace_map_bounds2(self, event, type=None, coordinates=None):
#
# # events: mouseup, mousedown, mousemove, mouseover,
# # mouseout, click, dblclick, preclick
# event = kwargs.get('type')
# # print('event = ', event)
# if (event == 'mouseup') or (event == 'mousemove') or \
# (event == 'click') or (event == 'dblclick'):
# w1.value = m.west
# w2.value = m.east
# w3.value = m.north
# w4.value = m.south
#
# # status.value = event
#
# # with output2:
# # print( event )
#
#--------------------------------------------------------------------
def update_map_bounds(self, caller_obj=None):
[bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
self.get_map_bounds( FROM_MAP = False )
bb_midlon = (bb_minlon + bb_maxlon) / 2
bb_midlat = (bb_minlat + bb_maxlat) / 2
bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
#----------------------------------------------------------
zoom = self.map_window.max_zoom # (usually 18)
self.map_window.center = bb_center
self.map_window.zoom = zoom
## print('max_zoom =', self.map_window.max_zoom)
## print('map_window.bounds =', self.map_window.bounds )
#------------------------------------
# Add "new_bounds" attribute to map
#------------------------------------
new_bounds = ((bb_minlat, bb_minlon), (bb_maxlat, bb_maxlon))
self.map_window.new_bounds = Tuple()
self.map_window.new_bounds = new_bounds
# update_map_bounds()
#--------------------------------------------------------------------
def zoom_out_to_new_bounds(self, change=None):
# change owner is the widget that triggers the handler
m = change.owner
#-----------------------------------------
# If not zoomed all the way out already,
# and we have a target bounding box
#-----------------------------------------
if (m.zoom > 1 and m.new_bounds):
b = m.new_bounds
n = change.new
if (n[0][0] < b[0][0] and n[0][1] < b[0][1] and
n[1][0] > b[1][0] and n[1][1] > b[1][1]):
#---------------------------------------
# new_bounds are now within map window
# Show bounding box as a rectangle ?
# weight = line/stroke thickness
#---------------------------------------
# rectangle = Rectangle( bounds=b, fill=False, weight=4)
# ## fill_opacity=0.0, \ fill_color="#0033FF" )
# m.add_layer(rectangle)
#-----------------------
m.new_bounds = None # (remove target)
else:
# zoom out
m.zoom = m.zoom - 1
# zoom_out_to_new_bounds()
#--------------------------------------------------------------------
# def zoom_out_to_new_bounds_v0(self, caller_obj=None):
#
# [bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
# self.get_map_bounds( FROM_MAP = False )
# bb_midlon = (bb_minlon + bb_maxlon) / 2
# bb_midlat = (bb_minlat + bb_maxlat) / 2
# bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
# zoom = self.map_window.max_zoom # (usually 18)
# zoom = zoom - 1
# ## print('max_zoom =', self.map_window.max_zoom)
#
# self.map_window.center = bb_center
# self.map_window.zoom = zoom
# print('map_window.bounds =', self.map_window.bounds )
# # bounds is read-only
# ## self.map_window.bounds = ((bb_midlat,bb_midlon),(bb_midlat,bb_midlon))
# while (True):
# # time.sleep(0.5) ######
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# # zoom_out_to_new_bounds_v0
#--------------------------------------------------------------------
def get_url_dir_filenames(self):
#-----------------------------------------
# Construct a list of filenames that are
# available in the opendap url directory
#-----------------------------------------
r = requests.get( self.data_url_dir.value )
lines = r.text.splitlines()
# n_lines = len(lines)
filenames = list()
for line in lines:
if ('"sameAs": "http://' in line) and ('www' not in line):
line = line.replace('.html"', '')
parts = line.split("/")
filename = parts[-1]
filenames.append( filename )
return filenames
# get_url_dir_filenames()
#--------------------------------------------------------------------
def update_filename_list(self, caller_obj=None):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.data_status.value = 'Retrieving filenames in URL dir...'
filenames = self.get_url_dir_filenames()
if (len(filenames) == 0):
self.reset_data_panel( KEEP_DIR=True )
msg = 'Error: No data files found in URL dir.'
self.data_status.value = msg
return
#-----------------------------------
# Update filename list & selection
#-----------------------------------
self.data_filename.options = filenames
self.data_filename.value = filenames[0]
self.data_status.value = 'Ready.'
# update_filename_list()
#--------------------------------------------------------------------
def get_opendap_file_url(self):
directory = self.data_url_dir.value
if (directory[-1] != '/'):
directory += '/'
#------------------------------------
filename = self.data_filename.value
self.opendap_file_url = (directory + filename)
# get_opendap_file_url()
#--------------------------------------------------------------------
def open_dataset(self):
timeout = self.timeout_secs
opendap_url = self.opendap_file_url
dataset = pydap.client.open_url( opendap_url, timeout=timeout )
self.dataset = dataset
# open_dataset()
#--------------------------------------------------------------------
def update_data_panel(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
# print('type(change) =', type(change))
if (self.data_filename.value == ''):
## self.update_filename_list() # (try this?)
return
self.get_opendap_file_url()
self.open_dataset()
self.get_all_var_shortnames()
self.get_all_var_longnames()
self.get_all_var_units()
#------------------------------------------
# Create map between long and short names
#------------------------------------------
long_names = self.var_long_names
short_names = self.var_short_names
units_names = self.var_units_names
self.short_name_map = dict(zip(long_names, short_names ))
self.units_map = dict(zip(long_names, units_names ))
#-------------------------------------------
# Update variable list and selected value.
#-------------------------------------------
self.data_var_name.options = short_names
self.data_var_name.value = short_names[0]
#------------------------------------
# Show other info for this variable
#------------------------------------
self.update_var_info()
self.clear_download_log() #####
#-------------------------------------------
# Try to show map extent in map panel
#-------------------------------------------
#### self.update_map_panel()
#-------------------------------------------
# Try to show date range in datetime panel
#-------------------------------------------
self.update_datetime_panel() # clears notes, too
# update_data_panel()
#--------------------------------------------------------------------
def update_var_info(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
return
#-----------------------------------------------
# Maybe later wrap this block in "try, except"
#----------------------------------------------
# Note: short_name is selected from Dropdown.
# var = dataset[ short_name ]
#----------------------------------------------
long_name = self.get_var_longname( short_name )
units = self.get_var_units( short_name )
shape = self.get_var_shape( short_name )
dims = self.get_var_dimensions( short_name )
dtype = self.get_var_dtype( short_name )
atts = self.get_var_attributes( short_name )
#---------------------------------------------
self.data_var_long_name.value = long_name
self.data_var_units.value = units
self.data_var_shape.value = shape
self.data_var_dims.value = dims
self.data_var_type.value = dtype
self.data_var_atts.options = atts
# update_var_info()
#--------------------------------------------------------------------
def get_all_var_shortnames(self):
self.var_short_names = list( self.dataset.keys() )
# get_all_var_shortnames()
#--------------------------------------------------------------------
def get_all_var_longnames(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
long_names = list()
for name in self.var_short_names:
try:
long_name = get_var_longname( name )
long_names.append( long_name )
except:
# Use short name if there is no long_name.
long_names.append( name )
# print('No long name found for:', name)
self.var_long_names = long_names
# get_all_var_longnames()
#--------------------------------------------------------------------
def get_all_var_units(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
units_names = list()
for name in self.var_short_names:
try:
units = self.get_var_units( name )
units_names.append( units )
except:
units_names.append( 'unknown' )
# print('No units name found for:', name)
self.var_units_names = units_names
# get_all_var_units()
#--------------------------------------------------------------------
def get_var_shortname(self):
short_name = self.data_var_name.value
if (short_name == ''):
pass
## print('Short name is not set.')
return short_name
# get_var_shortname()
#--------------------------------------------------------------------
def get_var_longname( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'long_name'):
return var.long_name
else:
return 'Long name not found.'
## return short_name
# get_var_longname()
#--------------------------------------------------------------------
def get_var_units( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'units'):
return var.units
else:
return 'unknown'
# get_var_units()
#--------------------------------------------------------------------
def get_var_shape( self, short_name ):
var = self.dataset[ short_name ]
return str(var.shape)
# get_var_shape()
#--------------------------------------------------------------------
def get_var_dimensions( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'dimensions'):
return str(var.dimensions)
else:
return 'No dimensions found.'
# get_var_dimensions()
#--------------------------------------------------------------------
def get_var_dtype( self, short_name ):
# The old Numeric single-character typecodes:
# ('f','d','h', 's','b','B','c','i','l'),
# corresponding to:
# ('f4','f8','i2','i2','i1','i1','S1','i4','i4'),
# are not yet supported.
type_map = {
'i1' : '1-byte signed integer',
'i2' : '2-byte signed integer',
'i4' : '4-byte signed integer',
'i8' : '8-byte signed integer',
'f4' : '4-byte floating point',
'f8' : '8-byte floating point',
'u1' : '1-byte unsigned integer',
| |
<reponame>hisiter97/TextRecognitionDataGenerator_Full
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def concatenate_dict(main_dict, new_dict):
for key in main_dict.keys():
main_dict[key] += [new_dict[key]]
def mixture_of_bivariate_normal_nll(
data, log_pi, mu, log_sigma, rho, eps=1e-6
):
x, y = data.unsqueeze(-2).unbind(-1)
mu_1, mu_2 = mu.unbind(-1)
log_sigma_1, log_sigma_2 = log_sigma.unbind(-1)
sigma_1 = log_sigma_1.exp() + eps
sigma_2 = log_sigma_2.exp() + eps
# Compute log prob of bivariate normal distribution
Z = torch.pow((x - mu_1) / sigma_1, 2) + torch.pow((y - mu_2) / sigma_2, 2)
Z -= 2 * rho * ((x - mu_1) * (y - mu_2)) / (sigma_1 * sigma_2)
log_N = -Z / (2 * (1 - rho ** 2) + eps)
log_N -= np.log(2 * np.pi) + log_sigma_1 + log_sigma_2
log_N -= .5 * torch.log(1 - rho ** 2 + eps)
# Use log_sum_exp to accurately compute log prob of mixture distribution
nll = -torch.logsumexp(log_pi + log_N, dim=-1)
return nll
def mixture_of_bivariate_normal_sample(
log_pi, mu, log_sigma, rho, eps=1e-6, bias=0.
):
batch_size = log_pi.shape[0]
ndims = log_pi.dim()
if ndims > 2:
# Collapse batch and seq_len dimensions
log_pi, mu, log_sigma, rho = [
x.reshape(-1, *x.shape[2:])
for x in [log_pi, mu, log_sigma, rho]
]
# Sample mixture index using mixture probabilities pi
pi = log_pi.exp() * (1 + bias)
mixture_idx = pi.multinomial(1).squeeze(1)
# Index the correct mixture for mu, log_sigma and rho
mu, log_sigma, rho = [
x[torch.arange(mixture_idx.shape[0]), mixture_idx]
for x in [mu, log_sigma, rho]
]
# Calculate biased variances
sigma = (log_sigma - bias).exp()
# Sample from the bivariate normal distribution
mu_1, mu_2 = mu.unbind(-1)
sigma_1, sigma_2 = sigma.unbind(-1)
z_1 = torch.randn_like(mu_1)
z_2 = torch.randn_like(mu_2)
x = mu_1 + sigma_1 * z_1
y = mu_2 + sigma_2 * (z_2 * ((1 - rho ** 2) ** .5) + z_1 * rho)
# Uncollapse the matrix to a tensor (if necessary)
sample = torch.stack([x, y], 1)
if ndims > 2:
sample = sample.view(batch_size, -1, 2)
return sample
class OneHotEncoder(nn.Module):
def __init__(self, vocab_size, device):
super().__init__()
self.vocab_size = vocab_size
self.device = device
def forward(self, arr, mask):
shp = arr.size() + (self.vocab_size,)
one_hot_arr = torch.zeros(shp).float().to(self.device)
one_hot_arr.scatter_(-1, arr.unsqueeze(-1), 1)
return one_hot_arr * mask.unsqueeze(-1)
class GaussianAttention(nn.Module):
def __init__(self, hidden_size, n_mixtures, attention_multiplier=.05):
super().__init__()
self.linear = nn.Linear(hidden_size, 3 * n_mixtures)
self.n_mixtures = n_mixtures
self.attention_multiplier = attention_multiplier
def forward(self, h_t, k_tm1, ctx, ctx_mask):
B, T, _ = ctx.shape
device = ctx.device
alpha, beta, kappa = torch.exp(self.linear(h_t))[:, None].chunk(3, dim=-1) # (B, 1, K) each
kappa = kappa * self.attention_multiplier + k_tm1.unsqueeze(1)
u = torch.arange(T, dtype=torch.float32).to(device)
u = u[None, :, None].repeat(B, 1, 1) # (B, T, 1)
phi = alpha * torch.exp(-beta * torch.pow(kappa - u, 2)) # (B, T, K)
phi = phi.sum(-1) * ctx_mask
w = (phi.unsqueeze(-1) * ctx).sum(1)
attention_vars = {
'alpha': alpha.squeeze(1),
'beta': beta.squeeze(1),
'kappa': kappa.squeeze(1),
'phi': phi,
}
return w, attention_vars
class HandwritingSynthesisNetwork(nn.Module):
def __init__(
self, vocab_size, hidden_size, n_layers,
n_mixtures_attention, n_mixtures_output, device
):
super().__init__()
self.device = device
self.encoder = OneHotEncoder(vocab_size, device)
self.lstm_0 = nn.LSTMCell(3 + vocab_size, hidden_size)
self.lstm_1 = nn.LSTM(3 + vocab_size + hidden_size, hidden_size, batch_first=True)
self.lstm_2 = nn.LSTM(3 + vocab_size + hidden_size, hidden_size, batch_first=True)
self.attention = GaussianAttention(hidden_size, n_mixtures_attention)
self.fc = nn.Linear(
hidden_size * 3, n_mixtures_output * 6 + 1
)
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.n_mixtures_output = n_mixtures_output
def __init__hidden(self, bsz):
hid_0 = torch.zeros(bsz, self.hidden_size * 2).float().to(self.device)
hid_0 = hid_0.chunk(2, dim=-1)
hid_1, hid_2 = None, None
w_0 = torch.zeros(bsz, self.vocab_size).float().to(self.device)
k_0 = torch.zeros(bsz, 1).float().to(self.device)
return hid_0, hid_1, hid_2, w_0, k_0
def __parse_outputs(self, out):
K = self.n_mixtures_output
mu, log_sigma, pi, rho, eos = out.split([2 * K, 2 * K, K, K, 1], -1)
# Apply activations to constrain values in the correct range
rho = torch.tanh(rho)
log_pi = F.log_softmax(pi, dim=-1)
eos = torch.sigmoid(-eos)
mu = mu.view(mu.shape[:-1] + (K, 2))
log_sigma = log_sigma.view(log_sigma.shape[:-1] + (K, 2))
return log_pi, mu, log_sigma, rho, eos
def forward(self, chars, chars_mask, strokes, strokes_mask, prev_states=None):
# Encode the characters
chars = self.encoder(chars, chars_mask)
if prev_states is None:
hid_0, hid_1, hid_2, w_t, k_t = self.__init__hidden(chars.size(0))
else:
hid_0, hid_1, hid_2, w_t, k_t = prev_states
lstm_0_out = []
attention_out = []
monitor_vars = {'phi': [], 'alpha': [], 'beta': [], 'kappa': []}
for x_t in strokes.unbind(1):
hid_0 = self.lstm_0(
torch.cat([x_t, w_t], -1),
hid_0
)
w_t, vars_t = self.attention(hid_0[0], k_t, chars, chars_mask)
k_t = vars_t['kappa']
concatenate_dict(monitor_vars, vars_t)
lstm_0_out.append(hid_0[0])
attention_out.append(w_t)
lstm_0_out = torch.stack(lstm_0_out, 1)
attention_out = torch.stack(attention_out, 1)
lstm_1_out, hid_1 = self.lstm_1(
torch.cat([strokes, attention_out, lstm_0_out], -1),
hid_1
)
lstm_2_out, hid_2 = self.lstm_2(
torch.cat([strokes, attention_out, lstm_1_out], -1),
hid_2
)
last_out = self.fc(
torch.cat([lstm_0_out, lstm_1_out, lstm_2_out], -1)
)
output_params = self.__parse_outputs(last_out)
monitor_vars = {x: torch.stack(y, 1) for x, y in monitor_vars.items()}
return output_params, monitor_vars, (hid_0, hid_1, hid_2, w_t, k_t)
def sample(self, chars, chars_mask, maxlen=1000, bias=5.0):
chars = self.encoder(chars, chars_mask)
last_idx = (chars_mask.sum(-1) - 2).long()
hid_0, hid_1, hid_2, w_t, k_t = self.__init__hidden(chars.size(0))
x_t = torch.zeros(chars.size(0), 3).float().to(self.device)
strokes = []
monitor_vars = {'phi': [], 'kappa': [], 'alpha': [], 'beta': []}
for i in range(maxlen):
hid_0 = self.lstm_0(
torch.cat([x_t, w_t], -1),
hid_0
)
w_t, vars_t = self.attention(hid_0[0], k_t, chars, chars_mask)
k_t = vars_t['kappa']
concatenate_dict(monitor_vars, vars_t)
_, hid_1 = self.lstm_1(
torch.cat([x_t, w_t, hid_0[0]], 1).unsqueeze(1),
hid_1
) # hid_1 - tuple of (1, batch_size, hidden_size)
_, hid_2 = self.lstm_2(
torch.cat([x_t, w_t, hid_1[0].squeeze(0)], 1).unsqueeze(1),
hid_2
) # hid_2 - tuple of (1, batch_size, hidden_size)
last_out = self.fc(
torch.cat([hid_0[0], hid_1[0].squeeze(0), hid_2[0].squeeze(0)], 1)
)
output_params = self.__parse_outputs(last_out)
x_t = torch.cat([
output_params[-1].bernoulli(),
mixture_of_bivariate_normal_sample(*output_params[:-1], bias=bias)
], dim=1)
################################################
# Exit Condition #
################################################
phi_t = vars_t['kappa']
check_1 = ~torch.gt(phi_t.max(1)[1], last_idx)
check_2 = torch.sign(phi_t.sum(1)).bool()
is_incomplete = check_1 | check_2
if is_incomplete.sum().item() == 0:
break
x_t = x_t * is_incomplete.float().unsqueeze(-1)
strokes.append(x_t)
monitor_vars = {x: torch.stack(y, 1) for x, y in monitor_vars.items()}
return torch.stack(strokes, 1) #[e, x, y]
def compute_loss(self, chars, chars_mask, strokes, strokes_mask, prev_states=None):
input_strokes = strokes[:, :-1]
input_strokes_mask = strokes_mask[:, :-1]
output_strokes = strokes[:, 1:]
output_params, monitor_vars, prev_states = self.forward(
chars, chars_mask, input_strokes, input_strokes_mask,
prev_states
)
stroke_loss = mixture_of_bivariate_normal_nll(
output_strokes[:, :, 1:],
*output_params[:-1] # passing everything except eos param
)
stroke_loss = (stroke_loss * input_strokes_mask).sum(-1).mean()
eos_loss = F.binary_cross_entropy(
output_params[-1].squeeze(-1),
output_strokes[:, :, 0],
reduction='none'
)
eos_loss = (eos_loss * input_strokes_mask).sum(-1).mean()
teacher_forced_sample = torch.cat([
output_params[-1].bernoulli(),
mixture_of_bivariate_normal_sample(*output_params[:-1], bias=pr.bias)
], dim=-1)
return stroke_loss, eos_loss, monitor_vars, prev_states, teacher_forced_sample
class HandwritingPredictionNetwork(nn.Module):
def __init__(
self, hidden_size, n_layers, n_mixtures_output, device
):
super().__init__()
self.device = device
self.lstm_0 = nn.LSTM(3, hidden_size, batch_first=True)
self.lstm_1 = nn.LSTM(3 + hidden_size, hidden_size, batch_first=True)
self.lstm_2 = nn.LSTM(3 + hidden_size, hidden_size, batch_first=True)
self.fc = nn.Linear(
hidden_size * 3, n_mixtures_output * 6 + 1
)
self.hidden_size = hidden_size
self.n_mixtures_output = n_mixtures_output
def __parse_outputs(self, out):
K = self.n_mixtures_output
mu, log_sigma, pi, rho, eos = out.split([2 * K, 2 * K, K, K, 1], -1)
# Apply activations to constrain values in the correct range
rho = torch.tanh(rho)
log_pi = F.log_softmax(pi, dim=-1)
eos = torch.sigmoid(-eos)
mu = mu.view(mu.shape[:-1] + (K, 2))
log_sigma = log_sigma.view(log_sigma.shape[:-1] + (K, 2))
return log_pi, mu, log_sigma, rho, eos
def forward(self, strokes, strokes_mask, prev_states=None):
if prev_states is None:
hid_0, hid_1, hid_2 = None, None, None
else:
hid_0, hid_1, hid_2 = prev_states
lstm_0_out, hid_0 = self.lstm_0(
strokes, hid_0
)
lstm_1_out, hid_1 = self.lstm_1(
torch.cat([strokes, lstm_0_out], -1),
hid_1
)
lstm_2_out, hid_2 = self.lstm_2(
torch.cat([strokes, lstm_1_out], -1),
hid_2
)
last_out = self.fc(
torch.cat([lstm_0_out, lstm_1_out, lstm_2_out], -1)
)
output_params = self.__parse_outputs(last_out)
return output_params, (hid_0, hid_1, hid_2)
def sample(self, batch_size=1, maxlen=1000):
hid_0, hid_1, hid_2 = None, None, None
x_t = torch.zeros(batch_size, 1, 3).float().to(self.device)
strokes = []
for i in range(maxlen):
_, hid_0 = self.lstm_0(x_t, hid_0)
_, hid_1 = self.lstm_1(
torch.cat([x_t, hid_0[0]], -1),
hid_1
) # hid_1 - tuple of (1, batch_size, hidden_size)
_, hid_2 = self.lstm_2(
torch.cat([x_t, hid_1[0]], -1),
hid_2
) # hid_2 - tuple of (1, batch_size, hidden_size)
last_out = self.fc(
torch.cat([hid_0[0], hid_1[0], hid_2[0]], -1)
).squeeze(1)
output_params = self.__parse_outputs(last_out)
x_t = torch.cat([
output_params[-1].bernoulli(),
mixture_of_bivariate_normal_sample(*output_params[:-1], bias=3.)
], dim=1).unsqueeze(1)
strokes.append(x_t)
return torch.cat(strokes, 1)
def compute_loss(self, strokes, strokes_mask, prev_states=None):
input_strokes = strokes[:, :-1]
input_strokes_mask = strokes_mask[:, :-1]
output_strokes = strokes[:, 1:]
output_params, prev_states = self.forward(
input_strokes, input_strokes_mask,
prev_states
)
stroke_loss = mixture_of_bivariate_normal_nll(
output_strokes[:, :, 1:],
*output_params[:-1] # passing everything except eos param
)
stroke_loss = (stroke_loss * input_strokes_mask).sum(-1).mean()
eos_loss = | |
<gh_stars>1-10
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import math
import time
import traceback
import pyroslib
DEBUG_LEVEL_OFF = 0
DEBUG_LEVEL_INFO = 1
DEBUG_LEVEL_DEBUG = 2
DEBUG_LEVEL_ALL = 3
DEBUG_LEVEL = DEBUG_LEVEL_ALL
INITIAL_SPEED = 40
INITIAL_SIDE_GAIN = 0.4
INITIAL_FORWARD_GAIN = 1.0
INITIAL_DISTANCE_GAIN = 1.7
INITIAL_CORNER_GAIN = 1.3
MAX_TIMEOUT = 5
MAX_WALL_END_WAIT_TIMEOUT = 2
MAX_ROTATE_DISTANCE = 500
MIN_ROTATE_DISTANCE = 0
MIN_DISTANCE = 100
SQRT2 = math.sqrt(2)
speed = INITIAL_SPEED
driveAngle = 0
run = True
received = True
turned = False
justScanWidth = False
leftDistance = 0
rightDistance = 0
IDEAL_DISTANCE_FACTOR = 0.7
corridorWidth = 400
idealDistance = (corridorWidth / 2) * IDEAL_DISTANCE_FACTOR
distanceGain = INITIAL_DISTANCE_GAIN
lastWallDistance = 0
scanTime = 0
SCAN_TIME = 1.2
continuousCounter = 0
wallEndWaitTimeout = 0
DISTANCE_AVG_TIME = 0.6
distances = {}
distanceTimestamp = 0
distanceDeg1 = -1
distanceDeg2 = -1
distance1 = -1
distance2 = -1
avgDistance1 = -1
avgDistance2 = -1
deltaDistance1 = -1
deltaDistance2 = -1
historyDistancesDeg1 = -1
historyDistancesDeg2 = -1
historyDistances1 = []
historyDistanceTimes1 = []
historyDistances2 = []
historyDistanceTimes2 = []
lastDistanceReceivedTime = 0
gyroAngle = 0
gyroDeltaAngle = 0
gyroStartAngle = 0
gyroIntegral = 0
EPSILON_ANGLE = 2
renewContinuous = time.time()
digestTime = time.time()
readingDistanceContinuous = True
readingGyroContinuous = True
KGAIN_INDEX = 0
KpI = 1
KiI = 2
KdI = 3
ERROR_INDEX = 0
PREVIOUS_ERROR_INDEX = 1
INTEGRAL_INDEX = 2
DERIVATIVE_INDEX = 3
DELTA_TIME_INDEX = 4
MINIMUM_FORWARD_SPEED = 20
MAX_ANGLE = 45
ACTION_NONE = 0
ACTION_TURN = 1
ACTION_DRIVE = 2
lastActionTime = 0
accumSideDeltas = []
accumForwardDeltas = []
sideAngleAccums = []
ACCUM_SIDE_DETALS_SIZE = 4
forwardGains = [INITIAL_FORWARD_GAIN, 0.8, 0.0, 0.2]
cornerGains = [INITIAL_CORNER_GAIN, 0.7, 0.0, 0.3]
sideGains = [INITIAL_SIDE_GAIN, 0.88, 0.0, 0.12]
def doNothing():
pass
doDistance = doNothing
doGyro = doNothing
algorithm = doNothing
algorithmIndex = 0
algorithmsList = []
def log(level, what):
if level <= DEBUG_LEVEL:
print(what)
def logArgs(*msg):
tnow = time.time()
dt = str((tnow - distanceTimestamp) * 1000) + "ms"
logMsg = formatArgR("", int(tnow * 1000) % 100000, 7) + " " + " ".join(msg)
log(DEBUG_LEVEL_DEBUG, logMsg)
def formatArgL(label, value, fieldSize):
if len(label) > 0:
return label + ":" + str(value).ljust(fieldSize)
else:
return str(value).ljust(fieldSize)
def formatArgR(label, value, fieldSize):
if len(label) > 0:
return label + ":" + str(value).rjust(fieldSize)
else:
return str(value).rjust(fieldSize)
def setAlgorithm(alg):
global algorithm
algorithm = alg
def setAlgorithms(*algs):
global algorithmIndex, algorithmsList
algorithmIndex = 0
algorithmsList[:] = []
for a in algs:
algorithmsList.append(a)
setAlgorithm(algorithmsList[0])
def handleDistances(topic, message, groups):
global historyDistancesDeg1, historyDistancesDeg2, historyDistances1, historyDistances2, historyDistanceTimes1, historyDistanceTimes2
global distanceDeg1, distanceDeg2, distance1, distance2, avgDistance1, avgDistance2, distanceTimestamp, deltaDistance1, deltaDistance2
global deltaTime, lastDistanceReceivedTime
global received
def addToHistoryWithTime(value, valueTime, history, historyTimes, maxTime):
history.append(value)
historyTimes.append(valueTime)
while len(historyTimes) > 0 and historyTimes[0] < valueTime - maxTime:
del history[0]
del historyTimes[0]
if len(history) > 1:
return value - history[len(history) - 2], sum(history) / len(history)
else:
return 0, 0
def sanitise(distance):
# distance -= 100
if distance < 2:
distance = 2
return distance
def toFloatString(f):
r = str(round(f, 1))
if "." not in r:
return r + ".0"
return r
receivedTime = time.time()
split = message.split(",")
deg1 = -1
val1 = -1
deg2 = -1
val2 = -1
i = 0
for s in split:
kv = s.split(":")
if kv[0] == "timestamp":
distanceTimestamp = float(kv[1])
else:
distances[toFloatString(float(kv[0]))] = sanitise(float(kv[1]))
deg = int(float(kv[0]))
val = int(float(kv[1]))
if i == 0:
deg1 = deg
val1 = val
elif i == 1:
deg2 = deg
val2 = val
i += 1
distanceDeg1 = deg1
distance1 = val1
distanceDeg2 = deg2
distance2 = val2
if deg1 > deg2:
tmp = deg2
deg2 = deg1
deg1 = tmp
tmp = distanceDeg2
distanceDeg2 = distanceDeg1
distanceDeg1 = tmp
if historyDistancesDeg1 != deg1 or historyDistancesDeg2 != deg2:
historyDistances1 = []
historyDistanceTimes1 = []
historyDistancesDeg1 = deg1
historyDistances2 = []
historyDistanceTimes2 = []
historyDistancesDeg2 = deg2
deltaDistance1, avgDistance1 = addToHistoryWithTime(distance1, receivedTime, historyDistances1, historyDistanceTimes1, DISTANCE_AVG_TIME)
deltaDistance2, avgDistance2 = addToHistoryWithTime(distance2, receivedTime, historyDistances2, historyDistanceTimes2, DISTANCE_AVG_TIME)
deltaTime = receivedTime - lastDistanceReceivedTime
lastDistanceReceivedTime = receivedTime
received = True
# if nextState is not None:
# nextState()
doDistance()
def handleGyroData(topic, message, groups):
global gyroAngle, gyroDeltaAngle, gyroDeltaTime, lastGyroReceivedTime
data = message.split(",")
gyroChange = float(data[2])
gyroDeltaAngle = gyroChange
gyroAngle += gyroChange
gyroDeltaTime = float(data[3])
lastGyroReceivedTime = time.time()
doGyro()
def stop():
global run, doDistance, doGyro
doDistance = doNothing
doGyro = doNothing
print("Stoping...")
setAlgorithm(doNothing)
run = False
pyroslib.publish("move/stop", "stop")
print("Stopped.")
def start():
global run, turned, justScanWidth
print("Starting...")
run = True
turned = False
justScanWidth = False
preStartInitiateLeftScan()
def quickstart():
global run, turned, justScanWidth, scanTime
print("Quick Starting...")
run = True
turned = False
justScanWidth = False
scanTime = time.time() + SCAN_TIME
pyroslib.publish("sensor/distance/read", str(0))
setAlgorithm(preStart)
def scanWidth():
global run, turned, justScanWidth
print("Scanning width...")
run = True
turned = False
justScanWidth = True
preStartInitiateLeftScan()
def steer(steerDistance, speed):
pyroslib.publish("move/steer", str(int(steerDistance)) + " " + str(int(speed)))
def drive(angle, speed):
pyroslib.publish("move/drive", str(int(angle)) + " " + str(int(speed)))
def requestDistanceAtAngle(angle):
pyroslib.publish("sensor/distance/deg", str(angle))
def normalise(value, maxValue):
if value > maxValue:
value = maxValue
if value < -maxValue:
value = -maxValue
return value / maxValue
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
def resetPid(pidValues):
pidValues[ERROR_INDEX] = 0
pidValues[PREVIOUS_ERROR_INDEX] = 0
pidValues[DERIVATIVE_INDEX] = 0
pidValues[INTEGRAL_INDEX] = 0
pidValues[DELTA_TIME_INDEX] = 0
def preStartInitiateLeftScan():
global scanTime
scanTime = time.time() + SCAN_TIME
setAlgorithm(preStartLeftScan)
pyroslib.publish("sensor/distance/read", str(0))
def preStartLeftScan():
global leftDistance
if time.time() > scanTime:
leftDistance = avgDistance2
log(DEBUG_LEVEL_INFO, "LeftDistance = " + str(leftDistance))
preStartInitiateRightScan()
def preStartInitiateRightScan():
global scanTime
scanTime = time.time() + SCAN_TIME
setAlgorithm(preStartRightScan)
pyroslib.publish("sensor/distance/read", str(90))
def preStartRightScan():
global rightDistance
if time.time() > scanTime:
rightDistance = avgDistance1
log(DEBUG_LEVEL_INFO, "RightDistance = " + str(rightDistance))
preStartWarmUp()
def preStartWarmUp():
global corridorWidth, idealDistance, scanTime
scanTime = time.time() + SCAN_TIME
if justScanWidth:
pyroslib.publish("sensor/distance/read", str(0))
setAlgorithm(doNothing)
else:
pyroslib.publish("sensor/distance/read", str(45))
setAlgorithm(preStart)
corridorWidth = leftDistance + rightDistance
idealDistance = (corridorWidth / 2) * IDEAL_DISTANCE_FACTOR # * SQRT2
log(DEBUG_LEVEL_INFO, "Corridor is " + str(corridorWidth) + "mm wide. Ideal distance=" + str(idealDistance))
pyroslib.publish("maze/data/corridor", str(corridorWidth))
pyroslib.publish("maze/data/idealDistance", str(idealDistance))
pyroslib.publish("sensor/distance/continuous", "start")
def preStartWarmUp():
global corridorWidth, idealDistance, scanTime
scanTime = time.time() + SCAN_TIME
if justScanWidth:
pyroslib.publish("sensor/distance/read", str(0))
setAlgorithm(doNothing)
else:
pyroslib.publish("sensor/distance/read", str(45))
setAlgorithm(preStart)
corridorWidth = leftDistance + rightDistance
idealDistance = (corridorWidth / 2) # * SQRT2
log(DEBUG_LEVEL_INFO, "Corridor is " + str(corridorWidth) + "mm wide. Ideal distance=" + str(idealDistance))
pyroslib.publish("maze/data/corridor", str(corridorWidth))
pyroslib.publish("maze/data/idealDistance", str(idealDistance))
# pyroslib.publish("sensor/distance/continuous", "start")
def preStart():
if time.time() > scanTime:
setAlgorithm(doNothing)
followLeftWall()
def followSide(forwardDistance, forwardDelta, sideDistance, sideDelta, direction, dt):
global lastActionTime
global sideAngleAccums, accumSideDeltas, accumForwardDeltas
global turned
def log1(*msg):
logArgs(*((formatArgL(" dt", round(dt, 3), 5),
formatArgR(" fd", forwardDistance, 5), formatArgR(" fdd", forwardDelta, 5),
formatArgR(" sd", sideDistance, 5), formatArgR(" sdd", sideDelta, 5)) + msg))
localSpeed = speed
if forwardDistance > 700 and abs(sideDistance - idealDistance) < 50 and sideDelta < 10:
localSpeed = 250
if forwardDistance > 1000:
forwardDelta = - int(localSpeed / 2)
if abs(forwardDelta) > (localSpeed / 2) * 1.5:
forwardDelta = sign(forwardDelta) * int(localSpeed / 2)
if abs(forwardDelta) < 0.1:
forwardDelta = -int(localSpeed / 2)
accumSideDeltas.append(sideDelta)
while len(accumSideDeltas) > ACCUM_SIDE_DETALS_SIZE:
del accumSideDeltas[0]
accumSideDelta = sum(accumSideDeltas) # / len(accumSideDeltas)
accumForwardDeltas.append(forwardDelta)
while len(accumForwardDeltas) > ACCUM_SIDE_DETALS_SIZE:
del accumForwardDeltas[0]
accumForwardDelta = sum(accumForwardDeltas) / len(accumForwardDeltas)
forwardError = forwardDistance
forwardControl = forwardGains[KGAIN_INDEX] * (forwardError * forwardGains[KpI] + (forwardDelta / dt) * forwardGains[KdI])
angle = sideGains[KGAIN_INDEX] * ((sideDistance - idealDistance) * sideGains[KpI] + (sideDelta / dt) * sideGains[KdI])
angle = - direction * normalise(angle, MAX_ANGLE) * MAX_ANGLE
if not turned and (sideDistance > corridorWidth or sideDelta > 150): # and forwardDistance > corridorWidth:
turned = True
log1(" T180 ", formatArgR("cw", round(corridorWidth, 1), 5))
pauseBeforeRightWall()
elif sideDistance < corridorWidth and forwardControl < idealDistance * distanceGain and forwardDelta <= 0:
steerDistance = cornerGains[KGAIN_INDEX] * (forwardError * cornerGains[KpI] + (forwardDelta / dt) * cornerGains[KdI])
# if steerDistance < 50:
# steerDistance = 50
if turned:
steerDistance = -steerDistance
log1(" CORNER ", formatArgR("s", round(speed, 1), 6), formatArgR("sd", round(steerDistance, 1), 5), formatArgR("fwe", round(forwardError), 6), formatArgR("fc", round(forwardControl), 6), formatArgR("a", round(angle), 6))
steer(steerDistance, speed)
sideAngleAccums = []
accumSideDeltas = []
accumForwardDeltas = []
else:
if len(sideAngleAccums) > 0:
sideAngleAccum = sum(sideAngleAccums) / len(sideAngleAccums)
else:
sideAngleAccum = 0
if len(sideAngleAccums) > 2 and (sign(sideAngleAccum) != sign(accumSideDelta) or abs(accumSideDelta) < 5) and abs(sideAngleAccum) > 9:
nextAction = ACTION_TURN
sideAngleAccums = []
else:
sideAngleAccums.append(angle)
while len(sideAngleAccums) > ACCUM_SIDE_DETALS_SIZE:
del sideAngleAccums[0]
nextAction = ACTION_DRIVE
if nextAction == ACTION_DRIVE:
log1(" DRIV ", formatArgR("i", round(forwardIntegral, 1), 6), formatArgR("s", round(localSpeed, 1), 6), formatArgR("a", round(angle, 1), 5), formatArgR("saa", round(sideAngleAccum), 6), formatArgR("fc", round(forwardControl), 6))
drive(angle, localSpeed)
else:
turnDirection = 1
dmsg = "turn to wall td:" + str(turnDirection)
if sideAngleAccum < 0:
turnDirection = -turnDirection
dmsg = "turn away the wall td:" + str(turnDirection)
angleR = sideAngleAccum / 180
fudgeFactor = 1
steerDistance = fudgeFactor * turnDirection * abs(accumForwardDelta) / abs(angleR)
log1(" TURN ", formatArgR("s", round(speed, 1), 6), | |
self.__total
def _set_total(self, v, load=False):
"""
Setter method for total, mapped from YANG variable /system/cpus/cpu/state/total (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_total is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_total() directly.
YANG Description: Total CPU utilization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """total must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__total = t
if hasattr(self, '_set'):
self._set()
def _unset_total(self):
self.__total = YANGDynClass(base=total.total, is_container='container', yang_name="total", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_user(self):
"""
Getter method for user, mapped from YANG variable /system/cpus/cpu/state/user (container)
YANG Description: Percentage of CPU time spent running in user space.
"""
return self.__user
def _set_user(self, v, load=False):
"""
Setter method for user, mapped from YANG variable /system/cpus/cpu/state/user (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_user is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_user() directly.
YANG Description: Percentage of CPU time spent running in user space.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """user must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__user = t
if hasattr(self, '_set'):
self._set()
def _unset_user(self):
self.__user = YANGDynClass(base=user.user, is_container='container', yang_name="user", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_kernel(self):
"""
Getter method for kernel, mapped from YANG variable /system/cpus/cpu/state/kernel (container)
YANG Description: Percentage of CPU time spent running in kernel space.
"""
return self.__kernel
def _set_kernel(self, v, load=False):
"""
Setter method for kernel, mapped from YANG variable /system/cpus/cpu/state/kernel (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_kernel is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_kernel() directly.
YANG Description: Percentage of CPU time spent running in kernel space.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """kernel must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__kernel = t
if hasattr(self, '_set'):
self._set()
def _unset_kernel(self):
self.__kernel = YANGDynClass(base=kernel.kernel, is_container='container', yang_name="kernel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_nice(self):
"""
Getter method for nice, mapped from YANG variable /system/cpus/cpu/state/nice (container)
YANG Description: Percentage of CPU time spent running low-priority (niced)
user processes.
"""
return self.__nice
def _set_nice(self, v, load=False):
"""
Setter method for nice, mapped from YANG variable /system/cpus/cpu/state/nice (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_nice is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_nice() directly.
YANG Description: Percentage of CPU time spent running low-priority (niced)
user processes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """nice must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__nice = t
if hasattr(self, '_set'):
self._set()
def _unset_nice(self):
self.__nice = YANGDynClass(base=nice.nice, is_container='container', yang_name="nice", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_idle(self):
"""
Getter method for idle, mapped from YANG variable /system/cpus/cpu/state/idle (container)
YANG Description: Percentage of CPU time spent idle.
"""
return self.__idle
def _set_idle(self, v, load=False):
"""
Setter method for idle, mapped from YANG variable /system/cpus/cpu/state/idle (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_idle is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_idle() directly.
YANG Description: Percentage of CPU time spent idle.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """idle must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__idle = t
if hasattr(self, '_set'):
self._set()
def _unset_idle(self):
self.__idle = YANGDynClass(base=idle.idle, is_container='container', yang_name="idle", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_wait(self):
"""
Getter method for wait, mapped from YANG variable /system/cpus/cpu/state/wait (container)
YANG Description: Percentage of CPU time spent waiting for I/O.
"""
return self.__wait
def _set_wait(self, v, load=False):
"""
Setter method for wait, mapped from YANG variable /system/cpus/cpu/state/wait (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_wait is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_wait() directly.
YANG Description: Percentage of CPU time spent waiting for I/O.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """wait must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__wait = t
if hasattr(self, '_set'):
self._set()
def _unset_wait(self):
self.__wait = YANGDynClass(base=wait.wait, is_container='container', yang_name="wait", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_hardware_interrupt(self):
"""
Getter method for hardware_interrupt, mapped from YANG variable /system/cpus/cpu/state/hardware_interrupt (container)
YANG Description: Percentage of CPU time spent servicing hardware interrupts.
"""
return self.__hardware_interrupt
def _set_hardware_interrupt(self, v, load=False):
"""
Setter method for hardware_interrupt, mapped from YANG variable /system/cpus/cpu/state/hardware_interrupt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hardware_interrupt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hardware_interrupt() directly.
YANG Description: Percentage of CPU time spent servicing hardware interrupts.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hardware_interrupt must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__hardware_interrupt = t
if hasattr(self, '_set'):
self._set()
def _unset_hardware_interrupt(self):
self.__hardware_interrupt = YANGDynClass(base=hardware_interrupt.hardware_interrupt, is_container='container', yang_name="hardware-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
def _get_software_interrupt(self):
"""
Getter method for software_interrupt, mapped from YANG variable /system/cpus/cpu/state/software_interrupt (container)
YANG Description: Percentage of CPU time spent servicing software interrupts
"""
return self.__software_interrupt
def _set_software_interrupt(self, v, load=False):
"""
Setter method for software_interrupt, mapped from YANG variable /system/cpus/cpu/state/software_interrupt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_software_interrupt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_software_interrupt() directly.
YANG Description: Percentage of CPU time spent servicing software interrupts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """software_interrupt must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)""",
})
self.__software_interrupt = t
if hasattr(self, '_set'):
self._set()
def _unset_software_interrupt(self):
self.__software_interrupt = YANGDynClass(base=software_interrupt.software_interrupt, is_container='container', yang_name="software-interrupt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='container', is_config=False)
index = __builtin__.property(_get_index)
total = __builtin__.property(_get_total)
user = __builtin__.property(_get_user)
kernel = __builtin__.property(_get_kernel)
nice = __builtin__.property(_get_nice)
idle = __builtin__.property(_get_idle)
wait = | |
<reponame>jinxu06/MetaFun-Tensorflow<filename>model.py
from six.moves import zip
import math
from absl import flags
import numpy as np
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers import layer_norm
import data.classification as cls_data
FLAGS = flags.FLAGS
# Model Specification
flags.DEFINE_integer("num_iters", 1, "Number of iterations (T).")
flags.DEFINE_integer("dim_reprs", 64, "Dimension of the functional representation outputs (dim(r(x))).")
flags.DEFINE_integer("nn_size", 64, "Size of hidden layers in neural modules.")
flags.DEFINE_integer("nn_layers", 3, "Number of MLP layers in neural modules.")
flags.DEFINE_integer("embedding_layers", 1, "Num of embedding mlp layers.")
flags.DEFINE_float(
"initial_inner_lr", 1.0, "The initial learning rate for functional updates.")
flags.DEFINE_boolean("use_kernel", False, "If True, use kernel; If False, use attention.")
flags.DEFINE_boolean("use_gradient", False, "If True, use gradient-based local updater; "
"If False, use neural local updater.")
flags.DEFINE_boolean("no_decoder", False, "Whether to remove decoder and directly use the functional "
"representation as the predictor .")
flags.DEFINE_string("initial_state_type", "zero", "Type of initial state (zero/constant/parametric)")
flags.DEFINE_string("attention_type", "dot_product", "Type of attention (only dot_product is supported now)")
flags.DEFINE_string("kernel_type", "se", "Type of kernel functions (se/deep_se)")
flags.DEFINE_boolean("repr_as_inputs", False, "If true, use reprs as inputs to the decoder; "
"If false, use reprs to generate weights of the predictor.")
# Regularisation
flags.DEFINE_float("dropout_rate", 0.0, "Rate of dropout.")
flags.DEFINE_float("l2_penalty_weight", 1e-8, "The weight measuring the "
"importance of the l2 regularization in the final loss. See λ₁ "
"in LEO paper.")
flags.DEFINE_float("orthogonality_penalty_weight", 1e-3, "The weight measuring "
"the importance of the decoder orthogonality regularization "
"in the final loss. See λ₂ in LEO paper.")
flags.DEFINE_float("label_smoothing", 0.0, "Label smoothing for classification tasks.")
class MetaFunClassifier(snt.AbstractModule):
def __init__(self, name="MetaFunClassifier"):
super(MetaFunClassifier, self).__init__(name=name)
self._float_dtype = tf.float32
self._int_dtype = tf.int32
# Components configurations
self._use_kernel = FLAGS.use_kernel
self._use_gradient = FLAGS.use_gradient
self._attention_type = FLAGS.attention_type
self._kernel_type = FLAGS.kernel_type
self._no_decoder = FLAGS.no_decoder
if self._no_decoder:
self._dim_reprs = 1
self._initial_state_type = FLAGS.initial_state_type
# Architecture configurations
self._nn_size = FLAGS.nn_size
self._nn_layers = FLAGS.nn_layers
self._dim_reprs = FLAGS.dim_reprs
self._num_iters = FLAGS.num_iters
self._embedding_layers = FLAGS.embedding_layers
# Regularisation configurations
self._l2_penalty_weight = FLAGS.l2_penalty_weight
self._dropout_rate = FLAGS.dropout_rate
self._label_smoothing = FLAGS.label_smoothing
self._orthogonality_penalty_weight = FLAGS.orthogonality_penalty_weight
# Data configurations
self._num_classes = FLAGS.num_classes
self._num_tr_examples_per_class = FLAGS.num_tr_examples_per_class
self._num_val_examples_per_class = FLAGS.num_val_examples_per_class
# Other configurations
self._initial_inner_lr = FLAGS.initial_inner_lr
self._nonlinearity = tf.nn.relu
def _build(self, data, is_training=True):
data = cls_data.ClassificationDescription(*data)
self.is_training = is_training
self.embedding_dim = data.tr_input.get_shape()[-1].value
# initial states
tr_reprs = self.forward_initialiser(data.tr_input)
val_reprs = self.forward_initialiser(data.val_input)
# inner learning rate
alpha = tf.compat.v1.get_variable("alpha", [1, 1], dtype=self._float_dtype,
initializer=tf.constant_initializer(self._initial_inner_lr), trainable=True)
# iterative functional updating
for k in range(self._num_iters):
updates = self.forward_local_updater(tr_reprs, data.tr_output, data.tr_input)
tr_updates = alpha * self.forward_kernel_or_attention(querys=data.tr_input, keys=data.tr_input, values=updates)
val_updates = alpha * self.forward_kernel_or_attention(querys=data.val_input, keys=data.tr_input, values=updates)
tr_reprs += tr_updates
val_reprs += val_updates
# decode functional representation
classifier_weights = self.forward_decoder(tr_reprs)
tr_loss, tr_metric = self.calculate_loss_and_acc(
data.tr_input, data.tr_output, classifier_weights)
classifier_weights = self.forward_decoder(val_reprs)
val_loss, val_metric = self.calculate_loss_and_acc(
data.val_input, data.val_output, classifier_weights)
# aggregate loss and metrics in a batch
batch_tr_loss = tf.reduce_mean(val_loss)
batch_tr_metric = tf.reduce_mean(tr_metric)
batch_val_loss = tf.reduce_mean(val_loss)
batch_val_metric = tf.reduce_mean(val_metric)
#
regularization_penalty = (
self._l2_regularization + self._decoder_orthogonality_reg)
return batch_val_loss + regularization_penalty, batch_tr_metric, batch_val_metric
### Initialiser r_0(x) ###
@snt.reuse_variables
def forward_initialiser(self, x):
num_points = tf.shape(x)[0]
if self._initial_state_type == "parametric":
reprs = self.parametric_initialiser(x)
elif self._initial_state_type == "constant":
reprs = self.constant_initialiser(num_points, trainable=True)
elif self._initial_state_type == 'zero':
reprs = self.constant_initialiser(num_points, trainable=False)
else:
raise NameError("Unknown initial state type")
tf.compat.v1.logging.info("forwarded {0} initialiser".format(self._initial_state_type))
return reprs
# r_0(x) = c
@snt.reuse_variables
def constant_initialiser(self, num_points, trainable=False):
with tf.compat.v1.variable_scope("constant_initialiser"):
if trainable:
init = tf.compat.v1.get_variable(
"initial_state", [1, self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(0.0), trainable=True)
else:
init = tf.zeros([1, self._dim_reprs])
init = tf.tile(init, [num_points, 1])
init = tf.concat([init for c in range(self._num_classes)], axis=-1)
return init
# r_0(x) = MLP(x)
@snt.reuse_variables
def parametric_initialiser(self, x):
with tf.compat.v1.variable_scope("parametric_initialiser"):
after_dropout = tf.nn.dropout(x, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module, n_dims=1)(after_dropout)
outputs = tf.concat([outputs for c in range(self._num_classes)], axis=-1)
return outputs
### Local Updater u ###
@snt.reuse_variables
def forward_local_updater(self, r, y, x=None, iter=""):
if self._use_gradient:
updates = self.gradient_local_updater(r=r, y=y, x=x, iter=iter)
tf.compat.v1.logging.info("forwarded gradient local updater")
else:
r_shape = r.shape.as_list()
r = tf.reshape(r, r_shape[:-1] +[self._num_classes, r_shape[-1]//self._num_classes])
updates = self.neural_local_updater(r=r, y=y, x=x, iter=iter)
updates = tf.reshape(updates, shape=r_shape)
tf.compat.v1.logging.info("forwarded neural local updater")
return updates
#
@snt.reuse_variables
def neural_local_updater(self, r, y, x=None, iter=""):
with tf.compat.v1.variable_scope("neural_local_updater{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
y = tf.one_hot(y, self._num_classes)
y = tf.transpose(y, perm=[0, 2, 1])
# reprs = tf.nn.dropout(reprs, rate=self.dropout_rate)
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# MLP m
module1 = snt.nets.MLP(
[self._nn_size] * self._nn_layers,
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(module1, n_dims=2)(r)
agg_outputs = tf.reduce_mean(outputs, axis=-2, keepdims=True)
outputs = tf.concat([outputs, tf.tile(agg_outputs, [1,self._num_classes,1])], axis=-1)
# MLP u+
module2 = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
name="true",
)
outputs_t = snt.BatchApply(module2, n_dims=2)(outputs)
# MLP u-
module3 = snt.nets.MLP(
[self._nn_size] * self._nn_layers + [self._dim_reprs],
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
name="false",
)
outputs_f = snt.BatchApply(module3, n_dims=2)(outputs)
outputs = outputs_t * y + outputs_f * (1-y)
return outputs
# gradient-based local updater, used in ablation study
@snt.reuse_variables
def gradient_local_updater(self, r, y, x=None, iter=""):
with tf.compat.v1.variable_scope("gradient_local_updater{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
lr = tf.compat.v1.get_variable(
"lr", [1, self._num_classes * self._dim_reprs],
dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
classifier_weights = self.forward_decoder(r)
tr_loss, _= self.calculate_loss_and_acc(
x, y, classifier_weights)
batch_tr_loss = tf.reduce_mean(tr_loss)
loss_grad = tf.gradients(batch_tr_loss, r)[0]
updates = - lr * loss_grad
return updates
### Kernel and Attention ###
@snt.reuse_variables
def forward_kernel_or_attention(self, querys, keys, values, iter=""):
if self._use_kernel:
if self._kernel_type == "se":
rtn_values = self.squared_exponential_kernel(querys, keys, values, iter=iter)
elif self._kernel_type == 'deep_se':
rtn_values = self.deep_se_kernel(querys, keys, values, iter=iter)
else:
raise NameError("Unknown kernel type")
tf.compat.v1.logging.info("forwarded {0} kernel".format(self._kernel_type))
else:
rtn_values = self.attention_block(querys, keys, values, iter=iter)
tf.compat.v1.logging.info("forwarded {0} attention".format(self._attention_type))
return rtn_values
@snt.reuse_variables
def squared_exponential_kernel(self, querys, keys, values, iter=""):
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("squared_exponential_kernel{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def deep_se_kernel(self, querys, keys, values, iter=""):
with tf.compat.v1.variable_scope("deep_se_kernel{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
# deep embedding of keys and querys
regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
module = snt.nets.MLP(
[self.embedding_dim] * self._embedding_layers,
activation=self._nonlinearity,
use_bias=True,
regularizers={"w": regularizer},
initializers={"w": initializer},
)
keys = snt.BatchApply(module, n_dims=1)(keys)
querys = snt.BatchApply(module, n_dims=1)(querys)
num_keys = tf.shape(keys)[0]
num_querys = tf.shape(querys)[0]
with tf.compat.v1.variable_scope("deep_se_kernel"):
sigma = tf.compat.v1.get_variable("sigma", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
lengthscale = tf.compat.v1.get_variable("lengthscale", shape=(), dtype=self._float_dtype,
initializer=tf.constant_initializer(1.0), trainable=True)
# compute \sum_i k(x, x_i)u_i
_keys = tf.tile(tf.expand_dims(keys, axis=1), [1, num_querys, 1])
_querys = tf.tile(tf.expand_dims(querys, axis=0), [num_keys, 1, 1])
sq_norm = tf.reduce_sum((_keys - _querys)**2, axis=-1)
kernel_qk = sigma**2 * tf.exp(- sq_norm / (2.*lengthscale**2))
k = kernel_qk
v = tf.einsum('kq,kv->qv', k, values)
return v
@snt.reuse_variables
def attention_block(self, querys, keys, values, iter=""):
config = {
"rep": "mlp",
"output_sizes": [self.embedding_dim] * self._embedding_layers,
"att_type": self._attention_type,
"normalise": True,
"scale": 1.0,
"l2_penalty_weight": self._l2_penalty_weight,
"nonlinearity": self._nonlinearity,
}
with tf.compat.v1.variable_scope("attention_block{}".format(iter), reuse=tf.compat.v1.AUTO_REUSE):
attention = Attention(config=config)
v = attention(keys, querys, values)
return v
### Decoder ###
@snt.reuse_variables
def forward_decoder(self, cls_reprs):
if self._no_decoder:
# use functional representation directly as the predictor, used in ablation study
tf.compat.v1.logging.info("no decoder used")
return cls_reprs
s = cls_reprs.shape.as_list()
cls_reprs = tf.reshape(cls_reprs, s[:-1]+[self._num_classes, self._dim_reprs])
weights_dist_params = self.decoder(cls_reprs)
fan_in = self.embedding_dim
fan_out = self._num_classes
stddev_offset = np.sqrt(2. / (fan_out + fan_in))
classifier_weights = self.sample(weights_dist_params,
stddev_offset=stddev_offset)
return classifier_weights
# this decoder generates weights of softmax
@snt.reuse_variables
def decoder(self, inputs):
with tf.compat.v1.variable_scope("decoder"):
l2_regularizer = tf.contrib.layers.l2_regularizer(self._l2_penalty_weight)
orthogonality_reg = get_orthogonality_regularizer(
self._orthogonality_penalty_weight)
initializer = tf.initializers.glorot_uniform(dtype=self._float_dtype)
# 2 * embedding_dim, because we are returning means and variances
decoder_module = snt.Linear(
self.embedding_dim * 2,
use_bias=True,
regularizers={"w": l2_regularizer},
initializers={"w": initializer},
)
outputs = snt.BatchApply(decoder_module, n_dims=2)(inputs)
self._orthogonality_reg = orthogonality_reg(decoder_module.w)
return outputs
### Other ###
@property
def dropout_rate(self):
return self._dropout_rate if self.is_training else 0.0
@property
def _l2_regularization(self):
return tf.cast(tf.reduce_sum(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)),
dtype=self._float_dtype)
def loss_fn(self, model_outputs, original_classes):
original_classes = tf.squeeze(original_classes, axis=-1)
one_hot_outputs = tf.one_hot(original_classes, depth=self._num_classes)
return tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=one_hot_outputs, logits=model_outputs, \
label_smoothing=self._label_smoothing, reduction=tf.compat.v1.losses.Reduction.NONE)
def predict(self, inputs, weights):
if self._no_decoder:
return weights
after_dropout = tf.nn.dropout(inputs, rate=self.dropout_rate)
preds = tf.einsum("ik,imk->im", after_dropout, weights)
return preds
def calculate_loss_and_acc(self, inputs, true_outputs, classifier_weights):
model_outputs = self.predict(inputs, classifier_weights)
model_predictions = tf.argmax(
model_outputs, -1, output_type=self._int_dtype)
accuracy = tf.contrib.metrics.accuracy(model_predictions,
tf.squeeze(true_outputs, axis=-1))
return self.loss_fn(model_outputs, true_outputs), accuracy
def sample(self, distribution_params, stddev_offset=0.):
means, unnormalized_stddev = tf.split(distribution_params, 2, axis=-1)
stddev = tf.exp(unnormalized_stddev)
stddev -= (1. - stddev_offset)
stddev = tf.maximum(stddev, 1e-10)
distribution = tf.distributions.Normal(loc=means, scale=stddev)
if not self.is_training:
return | |
of
each sequence must be equal to the length of each sequence of `entity_spans`. If you specify
`entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences
is automatically constructed by filling it with the [MASK] entity.
entities_pair (`List[str]`, `List[List[str]]`, *optional*):
The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings
representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los
Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of
each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify
`entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity
sequences is automatically constructed by filling it with the [MASK] entity.
max_mention_length (`int`):
The entity_position_ids's length.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
add_prefix_space (bool, optional):
The tokenizer will add a space at the beginning of the sentence when it set to `True`.
Defaults to `False`.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
Returns:
dict or list[dict] (for batch input):
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **entity_ids** (list[int]): List of token ids to be fed to a model. Included when
`entity_spans` is not `None`.
- **entity_position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `entity_spans` is not `None`.
- **entity_segment_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `entity_spans` is not `None`.
- **entity_attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `entity_spans` is not `None`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a special token, the index pair is `(0, 0)`. Included when
`stride` works.
- **overflow_to_sample** (int, optional): Index of example from which this
feature is generated. Included when `stride` works.
"""
global _add_prefix_space
if add_prefix_space:
_add_prefix_space = True
encode_output = super(LukeTokenizer, self).__call__(
text,
text_pair=text_pair,
max_seq_len=max_seq_len,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask)
if not entity_spans:
return encode_output
is_batched = bool(
(not is_split_into_words and isinstance(text, (list, tuple))) or
(is_split_into_words and isinstance(text, (list, tuple)) and
text and isinstance(text[0], (list, tuple))))
if is_batched:
if entities is None:
entities = [None] * len(entity_spans)
for i, ent in enumerate(zip(entities, entity_spans, text)):
entity_encode = self.entity_encode(ent[2], ent[0],
max_mention_length, ent[1])
encode_output[i].update(entity_encode)
if entity_spans_pair:
if entities_pair is None:
entities_pair = [None] * len(entity_spans_pair)
for i, ent in enumerate(
zip(entities_pair, entity_spans_pair, text_pair)):
entity_encode = self.entity_encode(
ent[2], ent[0], max_mention_length, ent[1], 1,
encode_output[i]['input_ids'].index(self.sep_token_id) +
2)
for k in entity_encode.keys():
encode_output[i][k] = encode_output[i][
k] + entity_encode[k]
else:
entity_encode = self.entity_encode(text, entities,
max_mention_length, entity_spans)
encode_output.update(entity_encode)
if entity_spans_pair:
entity_encode = self.entity_encode(
text_pair, entities_pair, max_mention_length,
entity_spans_pair, 1,
encode_output['input_ids'].index(self.sep_token_id) + 2)
for k in entity_encode.keys():
encode_output[k] = encode_output[k] + entity_encode[k]
return encode_output
def tokenize(self, text, add_prefix_space=False):
"""
Tokenize a string.
Args:
text (str):
The sentence to be tokenized.
add_prefix_space (boolean, default False):
Begin the sentence with at least one space to get invariance
to word order in GPT-2 (and Luke) tokenizers.
"""
if _add_prefix_space:
add_prefix_space = True
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for i, sub_text in enumerate(split_text):
sub_text = sub_text.strip()
if i == 0 and not sub_text:
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if not text.strip():
return []
if not tok_list:
return self._tokenize(text, add_prefix_space)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.added_tokens_encoder \
and sub_text not in self._all_special_tokens:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(itertools.chain.from_iterable((self._tokenize(token, add_prefix_space) if token not \
in self.added_tokens_encoder and token not in self._all_special_tokens \
else [token] for token in tokenized_text)))
added_tokens = list(self.added_tokens_encoder.keys(
)) + self._all_special_tokens
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(
pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i +
1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
text = ''.join(tokens)
text = | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Parser
------
Parser for extracting relevant information from Texas Dallas
County Felony and Misdemeanor Courts using regular expressions.
"""
from dallasparser.regex import *
from dallasparser.utils import *
from openpyxl import load_workbook
from dateutil.parser import parse
from bs4 import BeautifulSoup
from tqdm import tqdm
import unicodedata
import pandas as pd
import sys
import csv
import re
import os
class TXDallasParser:
"""
Main parser class.
:param input_path: Absolute path of HTML folder.
:type input_path: str
:param output_path: Absolute folder path of XLSX output files.
:type output_path: str
"""
COLUMN_ORDER = {
'judicial_information': JUDICIAL_HEADERS,
'sets_and_passes': SETS_HEADERS,
'names': NAMES_HEADERS,
'bonds': BONDS_HEADERS,
'charges': CHARGES_HEADERS,
'dispositions': DISPOSITIONS_HEADERS,
'reduced_enhanced_charges': RED_ENH_HEADERS,
'general_comments': GC_HEADERS,
'general_comments_ws_date': GC_WS_DATE_HEADERS,
'motions': MOTIONS_HEADERS,
'probation_revocation': PROB_REVOC_HEADERS,
'appeals': APPEALS_HEADERS,
'competency_data': COMPETENCY_HEADERS,
'payments': PAYMENTS_HEADERS,
'bond_comments': BOND_COMMENTS_HEADERS
}
"""
Column order for final exported XLSX files for each table. To modify, see `utils.py`.
"""
def __init__(self, input_path=None, output_path=None):
self.input_path = input_path
self.output_path = output_path
def extract_tables(self, trs):
"""
Method for separating all of the HTML elements to appropriate list for a given table.
:param trs: List of <tr> objects excluding table header elements
:type trs: list
:rtype: tuple
"""
IS_JUDICIAL = False
IS_BOND = False
IS_SETS = False
IS_NAMES = False
IS_CHARGE = False
IS_DISP = False
IS_RED_ENH = False
IS_COMMENT = False
IS_COMMENT_WS = False
IS_MOTION = False
IS_PROB_REVOC = False
IS_APPEAL = False
IS_COMP = False
IS_PAYMENT = False
IS_BOND_COMMENT = False
JUDICIAL_TRS = list()
BONDS_TRS = list()
SETS_TRS = list()
NAMES_TRS = list()
CHARGE_TRS = list()
DISP_TRS = list()
RED_ENH_TRS = list()
COMMENT_TRS = list()
COMMENT_WS_TRS = list()
MOTION_TRS = list()
PROB_REVOC_TRS = list()
APPEAL_TRS = list()
COMP_TRS = list()
PAYMENT_TRS = list()
BOND_COMMENT_TRS = list()
for tr in trs:
if tr.td.b is not None:
header = re.sub(r'\s+', '', tr.text.strip())
if header=='JUDICIALINFORMATION': IS_JUDICIAL = True
else: IS_JUDICIAL = False
if header=='BONDS': IS_BOND = True
else: IS_BOND = False
if header=='SETSANDPASSES': IS_SETS = True
else: IS_SETS = False
if header=='NAMES': IS_NAMES = True
else: IS_NAMES = False
if header=='CHARGE': IS_CHARGE = True
else: IS_CHARGE = False
if header=='DISPOSITION': IS_DISP = True
else: IS_DISP = False
if header=='REDUCED/ENHANCEDCHARGE': IS_RED_ENH = True
else: IS_RED_ENH = False
if header=='GENERALCOMMENTS': IS_COMMENT = True
else: IS_COMMENT = False
if header=='GENERALCOMMENTSWSDATE': IS_COMMENT_WS = True
else: IS_COMMENT_WS = False
if header=='MOTIONS': IS_MOTION = True
else: IS_MOTION = False
if header=='PROBATIONREVOCATION': IS_PROB_REVOC = True
else: IS_PROB_REVOC = False
if header=='APPEALS': IS_APPEAL = True
else: IS_APPEAL = False
if header=='COMPETENCYDATA' or header=='BAILBONDHEARING/COMPETENCYDATA': IS_COMP = True
else: IS_COMP = False
if header=='PAYMENTS': IS_PAYMENT = True
else: IS_PAYMENT = False
if header=='BONDCOMMENTS': IS_BOND_COMMENT = True
else: IS_BOND_COMMENT = False
else:
content = unicodedata.normalize('NFKD', tr.text.strip())
if IS_JUDICIAL: JUDICIAL_TRS.append(content)
elif IS_SETS: SETS_TRS.append(content)
elif IS_NAMES: NAMES_TRS.append(content)
elif IS_BOND: BONDS_TRS.append(content)
elif IS_CHARGE: CHARGE_TRS.append(content)
elif IS_DISP: DISP_TRS.append(content)
elif IS_RED_ENH: RED_ENH_TRS.append(content)
elif IS_COMMENT: COMMENT_TRS.append(content)
elif IS_COMMENT_WS: COMMENT_WS_TRS.append(content)
elif IS_MOTION: MOTION_TRS.append(content)
elif IS_PROB_REVOC: PROB_REVOC_TRS.append(content)
elif IS_APPEAL: APPEAL_TRS.append(content)
elif IS_COMP: COMP_TRS.append(content)
elif IS_PAYMENT: PAYMENT_TRS.append(content)
elif IS_BOND_COMMENT: BOND_COMMENT_TRS.append(content)
return (JUDICIAL_TRS, SETS_TRS, NAMES_TRS, BONDS_TRS, CHARGE_TRS,
DISP_TRS, RED_ENH_TRS, COMMENT_TRS, COMMENT_WS_TRS,
MOTION_TRS, PROB_REVOC_TRS, APPEAL_TRS, COMP_TRS,
PAYMENT_TRS, BOND_COMMENT_TRS)
def get_judicial_information(self, judicial_trs, da_case_id, jd_case_id):
"""
Extract `Judicial Information` Table.
:param judicial_trs: List of <tr> elements in `Judicial Information` section.
:type judical_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: dict
"""
DATA_DICT = dict()
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
### JUDICIAL_TRS[0]
DEF_NAME = DEF_NAME_REGEX.search(judicial_trs[0]).group().strip()
DEF_RACE = DEF_RACE_REGEX.search(judicial_trs[0]).group().strip()
DEF_SEX = DEF_SEX_REGEX.search(judicial_trs[0]).group().strip()
DEF_DOB = DEF_DOB_REGEX.search(judicial_trs[0]).group().strip()
DEF_AGE = DEF_AGE_REGEX.search(judicial_trs[0]).group().strip()
name = clean_val(DEF_NAME)
DATA_DICT['name_raw'] = name
DATA_DICT['race'] = clean_val(DEF_RACE)
DATA_DICT['sex'] = clean_val(DEF_SEX)
DATA_DICT['dob'] = clean_val(DEF_DOB)
DATA_DICT['age'] = clean_val(DEF_AGE)
### JUDICIAL_TRS[1]
DEF_ADDR = DEF_ADDR_REGEX.search(judicial_trs[1]).group().strip()
DEF_AC = DEF_AC_REGEX.search(judicial_trs[1]).group().strip()
DEF_PH = DEF_PH_REGEX.search(judicial_trs[1]).group().strip()
DEF_SS = DEF_SS_REGEX.search(judicial_trs[1]).group().strip()
DATA_DICT['def_adr1'] = clean_val(DEF_ADDR)
DATA_DICT['ac'] = clean_val(DEF_AC)
DATA_DICT['ph'] = clean_val(DEF_PH)
DATA_DICT['ss'] = clean_val(DEF_SS)
### JUDICIAL_TRS[2]
DEF_CITY = DEF_CITY_REGEX.search(judicial_trs[2]).group().strip()
DEF_STATE = DEF_STATE_REGEX.search(judicial_trs[2]).group().strip()
DEF_ZIP = DEF_ZIP_REGEX.search(judicial_trs[2]).group().strip()
DEF_DL_NUM = DEF_DL_NUM_REGEX.search(judicial_trs[2]).group().strip()
DEF_DL_STATE = DEF_DL_STATE_REGEX.search(judicial_trs[2]).group().strip()
DATA_DICT['city'] = clean_val(DEF_CITY)
DATA_DICT['state'] = clean_val(DEF_STATE)
DATA_DICT['zip'] = clean_val(DEF_ZIP)
DATA_DICT['dl_number'] = clean_val(DEF_DL_NUM)
DATA_DICT['dl_state'] = clean_val(DEF_DL_STATE)
### JUDICIAL_TRS[3]
DEF_OFF = DEF_OFF_REGEX.search(judicial_trs[3]).group().strip()
DEF_OFF_DT = DEF_OFF_DT_REGEX.search(judicial_trs[3]).group().strip()
DEF_OFF_TYP_CLS = DEF_OFF_TYP_CLS_REGEX.search(judicial_trs[3]).group().strip().split()
DEF_OFF_TYPE = DEF_OFF_TYP_CLS[0].strip()
DEF_OFF_CLS = DEF_OFF_TYP_CLS[1].strip()
DEF_OFF_GOC_CAT = DEF_OFF_GOC_CAT_REGEX.search(judicial_trs[3]).group().strip().split()
DEF_OFF_GOC = DEF_OFF_GOC_CAT[0].strip()
DEF_OFF_CAT = DEF_OFF_GOC_CAT[1].strip()
DEF_OFF_CODE = DEF_OFF_CODE_REGEX.search(judicial_trs[3]).group().strip()
DATA_DICT['offense'] = clean_val(DEF_OFF)
DATA_DICT['offense_date'] = clean_val(DEF_OFF_DT)
DATA_DICT['offense_type'] = clean_val(DEF_OFF_TYPE)
DATA_DICT['offense_class'] = clean_val(DEF_OFF_CLS)
DATA_DICT['goc'] = clean_val(DEF_OFF_GOC)
DATA_DICT['cat'] = clean_val(DEF_OFF_CAT)
DATA_DICT['offense_code'] = clean_val(DEF_OFF_CODE)
### JUDICIAL_TRS[4]
DEF_COMT = DEF_COMT_REGEX.search(judicial_trs[4]).group().strip()
DEF_SID_NUM = DEF_SID_NUM_REGEX.search(judicial_trs[4]).group().strip()
try:
DEF_OF_AMT = DEF_OF_AMT_REGEX.search(judicial_trs[4]).group().strip()
except AttributeError:
DEF_OF_AMT = ''
DATA_DICT['comt'] = clean_val(DEF_COMT)
DATA_DICT['sid_number'] = clean_val(DEF_SID_NUM)
DATA_DICT['of_amt'] = clean_val(DEF_OF_AMT)
### JUDICIAL_TRS[5]
DEF_COMPLAINANT = DEF_COMPLAINANT_REGEX.search(judicial_trs[5]).group().strip()
DEF_TAPE_NUM = DEF_TAPE_NUM_REGEX.search(judicial_trs[5]).group().strip()
DEF_ARREST_DATE = DEF_ARREST_DATE_REGEX.search(judicial_trs[5]).group().strip()
DATA_DICT['complainant'] = clean_val(DEF_COMPLAINANT)
DATA_DICT['tape_number'] = clean_val(DEF_TAPE_NUM)
DATA_DICT['arrest_date'] = clean_val(DEF_ARREST_DATE)
### JUDICIAL_TRS[6]
DEF_JUV_STAT = DEF_JUV_STAT_REGEX.search(judicial_trs[6]).group().strip()
DEF_REPEAT_STAT = DEF_REPEAT_STAT_REGEX.search(judicial_trs[6]).group().strip()
DEF_CAREER_STAT = DEF_CAREER_STAT_REGEX.search(judicial_trs[6]).group().strip()
DEF_ORIG_LOC = DEF_ORIG_LOC_REGEX.search(judicial_trs[6]).group().strip()
DEF_CURR_LOC = DEF_CURR_LOC_REGEX.search(judicial_trs[6]).group().strip()
DATA_DICT['juvenile_status'] = clean_val(DEF_JUV_STAT)
DATA_DICT['repeat_offender'] = clean_val(DEF_REPEAT_STAT)
DATA_DICT['career_offender'] = clean_val(DEF_CAREER_STAT)
DATA_DICT['orig_loc'] = clean_val(DEF_ORIG_LOC)
DATA_DICT['curr_loc'] = clean_val(DEF_CURR_LOC)
### JUDICIAL_TRS[7]
DEF_FILING_AGENCY = DEF_FILING_AGENCY_REGEX.search(judicial_trs[7]).group().strip()
DEF_SER_CASE_NUM = DEF_SER_CASE_NUM_REGEX.search(judicial_trs[7]).group().strip()
DEF_ARREST_NUM = DEF_ARREST_NUM_REGEX.search(judicial_trs[7]).group().strip()
DATA_DICT['filing_agency'] = clean_val(DEF_FILING_AGENCY)
DATA_DICT['ser_cas_number'] = clean_val(DEF_SER_CASE_NUM)
DATA_DICT['arrest_number'] = clean_val(DEF_ARREST_NUM)
### JUDICIAL_TRS[8]
DEF_LAI_NUM = DEF_LAI_NUM_REGEX.search(judicial_trs[8]).group().strip()
DEF_AIS_DSO_NUM = DEF_AIS_DSO_NUM_REGEX.search(judicial_trs[8]).group().strip()
DEF_BOOKING_NUM = DEF_BOOKING_NUM_REGEX.search(judicial_trs[8]).group().strip()
DATA_DICT['lai_number'] = clean_val(DEF_LAI_NUM)
DATA_DICT['ais_dso_number'] = clean_val(DEF_AIS_DSO_NUM)
DATA_DICT['booking_number'] = clean_val(DEF_BOOKING_NUM)
### JUDICIAL_TRS[9]
DEF_JP_FILE_DATE = DEF_JP_FILE_DATE_REGEX.search(judicial_trs[9]).group().strip()
DEF_JP_CASE_ID = DEF_JP_CASE_ID_REGEX.search(judicial_trs[9]).group().strip()
DEF_JP_COURT_ID = DEF_JP_COURT_ID_REGEX.search(judicial_trs[9]).group().strip()
DEF_FED = DEF_FED_REGEX.search(judicial_trs[9]).group().strip()
DEF_EVH = DEF_EVH_REGEX.search(judicial_trs[9]).group().strip()
DEF_AFF = DEF_AFF_REGEX.search(judicial_trs[9]).group().strip()
DATA_DICT['jp_file_date'] = clean_val(DEF_JP_FILE_DATE)
DATA_DICT['jp_case_id'] = clean_val(DEF_JP_CASE_ID)
DATA_DICT['jp_court_id'] = clean_val(DEF_JP_COURT_ID)
DATA_DICT['fed'] = clean_val(DEF_FED)
DATA_DICT['evh'] = clean_val(DEF_EVH)
DATA_DICT['aff'] = clean_val(DEF_AFF)
### JUDICIAL_TRS[10]
DEF_MAGIS_DATE = DEF_MAGIS_DATE_REGEX.search(judicial_trs[10]).group().strip()
DEF_MAGIS_COURT = DEF_MAGIS_COURT_REGEX.search(judicial_trs[10]).group().strip()
DEF_MAGIS_JUDGE = DEF_MAGIS_JUDGE_REGEX.search(judicial_trs[10]).group().strip()
DEF_BOUND_OVER = DEF_BOUND_OVER_REGEX.search(judicial_trs[10]).group().strip()
DATA_DICT['magistrate_date'] = clean_val(DEF_MAGIS_DATE)
DATA_DICT['magis_court'] = clean_val(DEF_MAGIS_COURT)
DATA_DICT['magis_judge'] = clean_val(DEF_MAGIS_JUDGE)
DATA_DICT['bound_over'] = clean_val(DEF_BOUND_OVER)
### JUDICIAL_TRS[11]
DEF_EXAM_TRIAL_DATE = DEF_EXAM_TRIAL_DATE_REGEX.search(judicial_trs[11]).group().strip()
DEF_EXAM_COURT = DEF_EXAM_COURT_REGEX.search(judicial_trs[11]).group().strip()
DEF_EXAM_JUDGE = DEF_EXAM_JUDGE_REGEX.search(judicial_trs[11]).group().strip()
DEF_IND_METHOD = DEF_IND_METHOD_REGEX.search(judicial_trs[11]).group().strip()
DATA_DICT['exam_trial_date'] = clean_val(DEF_EXAM_TRIAL_DATE)
DATA_DICT['exam_court'] = clean_val(DEF_EXAM_COURT)
DATA_DICT['exam_judge'] = clean_val(DEF_EXAM_JUDGE)
DATA_DICT['ind_meth'] = clean_val(DEF_IND_METHOD)
### JUDICIAL_TRS[12]
DEF_GJ_H_R_DATE = DEF_GJ_H_R_DATE_REGEX.search(judicial_trs[12]).group().strip()
DEF_GJ_NUM = DEF_GJ_NUM_REGEX.search(judicial_trs[12]).group().strip()
DEF_GJ_W_FILE_DATE = DEF_GJ_W_FILE_DATE_REGEX.search(judicial_trs[12]).group().strip()
DEF_GJ_DS = DEF_GJ_DS_REGEX.search(judicial_trs[12]).group().strip()
DEF_DA_DSP = DEF_DA_DSP_REGEX.search(judicial_trs[12]).group().strip()
DEF_ACC = DEF_ACC_REGEX.search(judicial_trs[12]).group().strip()
DEF_REAS = DEF_REAS_REGEX.search(judicial_trs[12]).group().strip()
DATA_DICT['gj_h_r_date'] = clean_val(DEF_GJ_H_R_DATE)
DATA_DICT['gj_number'] = clean_val(DEF_GJ_NUM)
DATA_DICT['gj_w_file'] = clean_val(DEF_GJ_W_FILE_DATE)
DATA_DICT['gj_ds'] = clean_val(DEF_GJ_DS)
DATA_DICT['da_dsp'] = clean_val(DEF_DA_DSP)
DATA_DICT['acc'] = clean_val(DEF_ACC)
DATA_DICT['reas'] = clean_val(DEF_REAS)
### JUDICIAL_TRS[13]
DEF_DA_DISP_DATE = DEF_DA_DISP_DATE_REGEX.search(judicial_trs[13]).group().strip()
DEF_MISD_REDUC = DEF_MISD_REDUC_REGEX.search(judicial_trs[13]).group().strip()
DEF_SENT_PROB = DEF_SENT_PROB_REGEX.search(judicial_trs[13]).group().strip()
DATA_DICT['da_dispos_date'] = clean_val(DEF_DA_DISP_DATE)
DATA_DICT['misdemeanor_reduction'] = clean_val(DEF_MISD_REDUC)
DATA_DICT['sentence_probated'] = clean_val(DEF_SENT_PROB)
### JUDICIAL_TRS[14]
DEF_JUDCL_CASE_ID = DEF_JUDCL_CASE_ID_REGEX.search(judicial_trs[14]).group().strip()
DEF_GJ_CT = DEF_GJ_CT_REGEX.search(judicial_trs[14]).group().strip()
DEF_PROS_STATE = DEF_PROS_STAT_REGEX.search(judicial_trs[14]).group().strip()
DEF_PROS_NAME = DEF_PROS_NAME_REGEX.search(judicial_trs[14]).group().strip()
DATA_DICT['judcl_case_id'] = clean_val(DEF_JUDCL_CASE_ID)
DATA_DICT['gj_ct'] = clean_val(DEF_GJ_CT)
DATA_DICT['pros_stat'] = clean_val(DEF_PROS_STATE)
DATA_DICT['pros_name'] = clean_val(DEF_PROS_NAME)
### JUDICIAL_TRS[15]
DEF_COURT_ASSIGNED = DEF_COURT_ASSIGNED_TO_REGEX.search(judicial_trs[15]).group().strip()
DEF_DATE_ASSIGNED = DEF_DATE_ASSIGNED_REGEX.search(judicial_trs[15]).group().strip()
DEF_ASSIGNED_BY = DEF_ASSIGNED_BY_REGEX.search(judicial_trs[15]).group().strip()
DEF_REASON = DEF_REASON_REGEX.search(judicial_trs[15]).group().strip()
DATA_DICT['court_assigned_to'] = clean_val(DEF_COURT_ASSIGNED)
DATA_DICT['date_assigned'] = clean_val(DEF_DATE_ASSIGNED)
DATA_DICT['assigned_by'] = clean_val(DEF_ASSIGNED_BY)
DATA_DICT['reason'] = clean_val(DEF_REASON)
### JUDICIAL_TRS[16]
DEF_PRE_CASE_ID = DEF_PRE_CASE_ID_REGEX.search(judicial_trs[16]).group().strip()
DEF_SUC_CASE_ID = DEF_SUC_CASE_ID_REGEX.search(judicial_trs[16]).group().strip()
DATA_DICT['preceeding_da_case_id'] = clean_val(DEF_PRE_CASE_ID)
DATA_DICT['succeeding_da_case_id'] = clean_val(DEF_SUC_CASE_ID)
### JUDICIAL_TRS[17]
DEF_TRN = DEF_TRN_REGEX.search(judicial_trs[17]).group().strip()
DEF_TRS = DEF_TRS_REGEX.search(judicial_trs[17]).group().strip()
DEF_WARR_STAT = DEF_WARR_STAT_REGEX.search(judicial_trs[17]).group().strip()
DEF_STATE_OFF_CD = DEF_STATE_OFF_CD_REGEX.search(judicial_trs[17]).group().strip()
DATA_DICT['trn'] = clean_val(DEF_TRN)
DATA_DICT['trs'] = clean_val(DEF_TRS)
DATA_DICT['warrant_status'] = clean_val(DEF_WARR_STAT)
DATA_DICT['state_offense_code'] = clean_val(DEF_STATE_OFF_CD)
return DATA_DICT
def get_sets_and_passes(self, sets_trs, da_case_id, jd_case_id):
"""
Extract `Sets and Passes` Table.
:param sets_trs: List of <tr> elements in `Sets and Passes` section.
:type sets_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: list
"""
sp_id = 0
DATA_LIST = list()
DATA_DICT = dict()
for content in sets_trs:
if SET_DATE_REGEX.search(content):
if len(DATA_DICT.keys())>0:
DATA_LIST.append(DATA_DICT)
DATA_DICT = dict()
sp_id += 1
DATA_DICT['sp_id'] = sp_id
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
SET_DATE = SET_DATE_REGEX.search(content).group().strip()
SET_TIME = SET_TIME_REGEX.search(content).group().strip()
SET_TYPE = SET_TYPE_REGEX.search(content).group().strip()
PASSED_DT = SET_PASSED_DATE_REGEX.search(content).group().strip()
DATA_DICT['set_for_date'] = clean_val(SET_DATE)
DATA_DICT['set_for_time'] = clean_val(SET_TIME)
DATA_DICT['set_type'] = clean_val(SET_TYPE)
DATA_DICT['passed_to_date'] = clean_val(PASSED_DT)
elif SET_DISP_CODE_REGEX.search(content):
DISP_CODE = SET_DISP_CODE_REGEX.search(content).group().strip()
PASS_GEN = SET_PASS_GEN_REGEX.search(content).group().strip()
COMMENTS = SET_COMMENTS_REGEX.search(content).group().strip()
DATA_DICT['set_disposition_code'] = clean_val(DISP_CODE)
DATA_DICT['passed_generally'] = clean_val(PASS_GEN)
DATA_DICT['comments'] = clean_val(COMMENTS)
elif SET_STATES_REC_REGEX.search(content):
STATE_REC = SET_STATES_REC_REGEX.search(content).group().strip()
REC_NO = SET_REC_NUM_REGEX.search(content).group().strip()
DATA_DICT['states_recommendation'] = clean_val(STATE_REC)
DATA_DICT['rec_no'] = clean_val(REC_NO)
DATA_LIST.append(DATA_DICT)
return DATA_LIST
def get_names(self, names_trs, da_case_id, jd_case_id):
"""
Extract `Names` Table.
:param names_trs: List of <tr> elements in `Names` section.
:type names_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: list
"""
name_id = 0
DATA_LIST = list()
DATA_DICT = dict()
for content in names_trs:
if NAME_ASSOC_REGEX.search(content):
if len(DATA_DICT.keys())>0:
DATA_LIST.append(DATA_DICT)
DATA_DICT = dict()
name_id += 1
DATA_DICT['name_id'] = name_id
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
ASSOC_NAME = NAME_ASSOC_REGEX.search(content).group().strip()
NAME_CODE = NAME_REF_CODE_REGEX.search(content).group().strip()
DATA_DICT['associated_name'] = clean_val(ASSOC_NAME)
DATA_DICT['name_ref_code'] = clean_val(NAME_CODE)
else:
CT_APPT = NAME_CT_APPT_REGEX.search(content).group().strip()
BAR_NO = NAME_BAR_NO_REGEX.search(content).group().strip()
BONDER = NAME_BOND_MAKER_REGEX.search(content).group().strip()
BOND = NAME_BOND_MADE_REGEX.search(content).group().strip()
DATA_DICT['ct_appointed'] = clean_val(CT_APPT)
DATA_DICT['bar_no'] = clean_val(BAR_NO)
DATA_DICT['bond_maker'] = clean_val(BONDER)
DATA_DICT['dt_bond_made'] = clean_val(BOND)
DATA_LIST.append(DATA_DICT)
return DATA_LIST
def get_bonds(self, bonds_trs, da_case_id, jd_case_id):
"""
Extract `Bonds` Table.
:param bonds_trs: List of <tr> elements in `Bonds` section.
:type bonds_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: list
"""
bond_id = 0
DATA_LIST = list()
DATA_DICT = dict()
for content in bonds_trs:
if BOND_DATE_SET_REGEX.search(content):
if len(DATA_DICT.keys())>0:
DATA_LIST.append(DATA_DICT)
DATA_DICT = dict()
bond_id += 1
DATA_DICT['bond_id'] = bond_id
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
DATE_SET = BOND_DATE_SET_REGEX.search(content).group().strip()
AMT = BOND_AMT_REGEX.search(content).group().strip()
TYPE = BOND_TYPE_REGEX.search(content).group().strip()
SET_BY = BOND_SET_BY_REGEX.search(content).group().strip()
JUDGE = BOND_JUDGE_REGEX.search(content).group().strip()
DATA_DICT['date_bond_set'] = clean_val(DATE_SET)
DATA_DICT['amt'] = clean_val(AMT)
DATA_DICT['type'] = clean_val(TYPE)
DATA_DICT['set_by_court'] = clean_val(SET_BY)
DATA_DICT['judge'] = clean_val(JUDGE)
elif BOND_REC_NO_REGEX.search(content):
REC_NO = BOND_REC_NO_REGEX.search(content).group().strip()
DATA_DICT['rec_no'] = clean_val(REC_NO)
DATA_LIST.append(DATA_DICT)
return DATA_LIST
def get_charges(self, charge_trs, da_case_id, jd_case_id):
"""
Extract `Charges` Table.
:param charges_trs: List of <tr> elements in `Charges` section.
:type charges_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: list
"""
DATA_LIST = list()
DATA_DICT = dict()
charge_id = 0
for content in charge_trs:
if CHRG_NAME_REGEX.search(content):
if len(DATA_DICT.keys())>0:
DATA_LIST.append(DATA_DICT)
DATA_DICT = dict()
charge_id += 1
DATA_DICT['charge_id'] = charge_id
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
CHRG_NAME = CHRG_NAME_REGEX.search(content).group().strip()
OFF_CD = CHRG_OFF_CD_REGEX.search(content).group().strip()
STATE_CD = CHRG_STATE_CD_REGEX.search(content).group().strip()
DATA_DICT['name_raw'] = clean_val(CHRG_NAME)
DATA_DICT['offense_cd'] = clean_val(OFF_CD)
DATA_DICT['state_cd'] = clean_val(STATE_CD)
elif CHRG_DESC_REGEX.search(content):
DESC = CHRG_DESC_REGEX.search(content).group().strip()
COMT = CHRG_COMT_REGEX.search(content).group().strip()
TYPE_CL = CHRG_TYPE_CL_REGEX.search(content).group().strip().split()
try:
GOC = CHRG_GOC_REGEX.search(content).group().strip()
except AttributeError:
GOC = ''
DATA_DICT['offense_desc'] = clean_val(DESC)
DATA_DICT['comt'] = clean_val(COMT)
if len(TYPE_CL)==2:
DATA_DICT['offense_type'] = clean_val(TYPE_CL[0]).strip()
DATA_DICT['offense_class'] = clean_val(TYPE_CL[1]).strip()
else:
DATA_DICT['offense_type'] = clean_val(TYPE_CL[0]).strip()
DATA_DICT['goc'] = clean_val(GOC)
elif CHRG_GJ_CT_REGEX.search(content):
GJ_CT = CHRG_GJ_CT_REGEX.search(content).group().strip()
CURR_CT = CHRG_CURR_CT_REGEX.search(content).group().strip()
PREV_CT = CHRG_PREV_CT_REGEX.search(content).group().strip()
try:
CHOV_DT = CHRG_CHOV_DT_REGEX.search(content).group().strip()
except AttributeError:
CHOV_DT = ''
DATA_DICT['gj_court'] = clean_val(GJ_CT)
DATA_DICT['current_court'] = clean_val(CURR_CT)
DATA_DICT['previous_courts'] = clean_val(PREV_CT)
DATA_DICT['chov_dt'] = clean_val(CHOV_DT)
DATA_LIST.append(DATA_DICT)
return DATA_LIST
def get_dispositions(self, disp_trs, da_case_id, jd_case_id):
"""
Extract `Dispositions` Table.
:param disp_trs: List of <tr> elements in `Dispositions` section.
:type disp_trs: list
:param da_case_id: DA Case ID used for linkage.
:type da_case_id: str
:param jd_case_id: Judicial Case ID used for linkage.
:type jd_case_id: str
:rtype: list
"""
disp_id = 0
DATA_LIST = list()
DATA_DICT = dict()
for content in disp_trs:
if DISP_NO_REGEX.search(content):
if len(DATA_DICT.keys())>0:
DATA_LIST.append(DATA_DICT)
DATA_DICT = dict()
disp_id += 1
DISP_NO = DISP_NO_REGEX.search(content).group().strip()
DATA_DICT['ct_disp_no'] = clean_val(DISP_NO)
DATA_DICT['da_case_id'] = da_case_id
DATA_DICT['jd_case_id'] = jd_case_id
DATA_DICT['disp_id'] = disp_id
elif DISP_VERDICT_DATE_REGEX.search(content):
VERDICT_DATE = DISP_VERDICT_DATE_REGEX.search(content).group().strip()
VERDICT_BY = DISP_VERDICT_BY_REGEX.search(content).group().strip()
VERDICT_JG = DISP_VERDICT_JG_REGEX.search(content).group().strip()
TC = DISP_TC_REGEX.search(content).group().strip()
DISM_TYPE = DISP_DISM_TYP_REGEX.search(content).group().strip()
VERDICT_VOL = DISP_VERDICT_VOL_REGEX.search(content).group().strip()
VERDICT_PAGE = DISP_VERDICT_PAGE_REGEX.search(content).group().strip()
DATA_DICT['verdict_date'] = clean_val(VERDICT_DATE)
DATA_DICT['verdict_by'] = clean_val(VERDICT_BY)
DATA_DICT['verdict_jg'] = clean_val(VERDICT_JG)
DATA_DICT['verdict_tc'] = clean_val(TC)
DATA_DICT['dism_type'] = clean_val(DISM_TYPE)
DATA_DICT['verdict_vol'] = clean_val(VERDICT_VOL)
DATA_DICT['verdict_page'] = clean_val(VERDICT_PAGE)
elif DISP_SENT_DATE_REGEX.search(content):
SENT_DATE = DISP_SENT_DATE_REGEX.search(content).group().strip()
SENT_BY = DISP_SENT_BY_REGEX.search(content).group().strip()
SENT_TO = DISP_SENT_TO_REGEX.search(content).group().strip()
SENT_YEARS = DISP_SENT_YEARS_REGEX.search(content).group().strip()
SENT_MONTHS = DISP_SENT_MONTHS_REGEX.search(content).group().strip()
SENT_HOURS = DISP_SENT_HOURS_REGEX.search(content).group().strip()
DATA_DICT['sentence_date'] = clean_val(SENT_DATE)
DATA_DICT['sentence_by'] = clean_val(SENT_BY)
DATA_DICT['sentence_to'] = clean_val(SENT_TO)
DATA_DICT['sentence_years'] = clean_val(SENT_YEARS)
DATA_DICT['sentence_months'] = clean_val(SENT_MONTHS)
DATA_DICT['sentence_hours'] = clean_val(SENT_HOURS)
elif DISP_SENT_BEGIN_REGEX.search(content):
SENT_BGN = DISP_SENT_BEGIN_REGEX.search(content).group().strip()
SENT_VOL = DISP_SENT_VOL_REGEX.search(content).group().strip()
SENT_PAGE = DISP_SENT_PAGE_REGEX.search(content).group().strip()
DISCH = DISP_DISCHARGE_REGEX.search(content).group().strip()
DISCH_TYPE = DISP_DISCH_TYPE_REGEX.search(content).group().strip()
DISP_NUM = DISP_NUM_REGEX.search(content).group().strip()
DATA_DICT['sentence_to_begin'] = clean_val(SENT_BGN)
DATA_DICT['sentence_vol'] = clean_val(SENT_VOL)
DATA_DICT['sentence_page'] = clean_val(SENT_PAGE)
DATA_DICT['discharge'] = clean_val(DISCH)
DATA_DICT['discharge_type'] = clean_val(DISCH_TYPE)
DATA_DICT['discharge_number'] = clean_val(DISP_NUM)
elif DISP_PROB_SENT_TO_REGEX.search(content):
PROB_SENT = DISP_PROB_SENT_TO_REGEX.search(content).group().strip()
PROB_SENT_YEARS = DISP_PROB_SENT_YEARS_REGEX.search(content).group().strip()
PROB_SENT_MONTHS = DISP_PROB_SENT_MONTHS_REGEX.search(content).group().strip()
PROB_SENT_DAYS = DISP_PROB_SENT_DAYS_REGEX.search(content).group().strip()
MULT_SENT = DISP_MULT_SENT_REGEX.search(content).group().strip()
DATA_DICT['probated_sentence_to'] = clean_val(PROB_SENT)
DATA_DICT['probation_sentence_years'] = clean_val(PROB_SENT_YEARS)
DATA_DICT['probation_sentence_months'] = clean_val(PROB_SENT_MONTHS)
DATA_DICT['probation_sentence_days'] = clean_val(PROB_SENT_DAYS)
DATA_DICT['mult_sent'] = clean_val(MULT_SENT)
elif DISP_PROB_YEARS_REGEX.search(content):
PROB_YEARS = DISP_PROB_YEARS_REGEX.search(content).group().strip()
PROB_MONTHS = DISP_PROB_MONTHS_REGEX.search(content).group().strip()
PROB_DAYS = DISP_PROB_DAYS_REGEX.search(content).group().strip()
PROB_START = DISP_PROB_START_DATE_REGEX.search(content).group().strip()
DATA_DICT['probated_for_years'] = clean_val(PROB_YEARS)
DATA_DICT['probated_for_months'] = clean_val(PROB_MONTHS)
DATA_DICT['probated_for_days'] = | |
import numpy as np
from itertools import cycle
from numba import jit
from functools import lru_cache
from hierarch.internal_functions import (
nb_unique,
id_cluster_counts,
msp,
set_numba_random_state,
_repeat,
nb_fast_shuffle,
nb_strat_shuffle,
weights_to_index,
)
class Bootstrapper:
"""Bootstrapper(random_state=None, kind="weights")
This transformer performs a nested bootstrap on the target data.
Undefined behavior if the target data is not lexicographically
sorted.
Parameters
----------
random_state : int or numpy.random.Generator instance, optional
Seeds the Bootstrapper for reproducibility, by default None
kind : { "weights", "bayesian", "indexes" }
Specifies the bootstrapping algorithm.
"weights" generates a set of new integer weights for
each datapoint.
"bayesian" generates a set of new real weights for
each datapoint.
"indexes" generates a set of new indexes for the dataset.
Mathematically, this is equivalent to demanding integer weights.
Notes
-----
These approaches have different outputs - "weights" and "bayesian"
output arrays the same size of the original array, but with
every y-value multiplied by generated weight. "indexes" will
output an array that is not necessarily the same size as the
original array, but the weight of each y-value is 1, so certain
metrics are easier to compute. Assuming both algorithms generated
the "same" sample in terms of reweights, the arrays will be
equivalent after the groupby and aggregate step.
"bayesian" has no reindexing equivalent.
Examples
--------
Generate a simple design matrix with dependent variable always equal to 1.
>>> from hierarch.power import DataSimulator
>>> paramlist = [[1]*2, [0]*6, [0]*18]
>>> hierarchy = [2, 3, 3]
>>> datagen = DataSimulator(paramlist)
>>> datagen.fit(hierarchy)
>>> data = datagen.generate()
>>> data
array([[1., 1., 1., 1.],
[1., 1., 2., 1.],
[1., 1., 3., 1.],
[1., 2., 1., 1.],
[1., 2., 2., 1.],
[1., 2., 3., 1.],
[1., 3., 1., 1.],
[1., 3., 2., 1.],
[1., 3., 3., 1.],
[2., 1., 1., 1.],
[2., 1., 2., 1.],
[2., 1., 3., 1.],
[2., 2., 1., 1.],
[2., 2., 2., 1.],
[2., 2., 3., 1.],
[2., 3., 1., 1.],
[2., 3., 2., 1.],
[2., 3., 3., 1.]])
Generate a bootstrapped sample by resampling column 1, then column 2. The "weights"
algorithm multiplies all of the dependent variable values by the resampled weights.
Starting at column 1 means that some column 2 clusters might be zero-weighted.
>>> boot = Bootstrapper(random_state=1, kind="weights")
>>> boot.fit(data, skip=None)
>>> boot.transform(data, start=1)
array([[1., 1., 1., 3.],
[1., 1., 2., 0.],
[1., 1., 3., 3.],
[1., 2., 1., 0.],
[1., 2., 2., 0.],
[1., 2., 3., 0.],
[1., 3., 1., 1.],
[1., 3., 2., 1.],
[1., 3., 3., 1.],
[2., 1., 1., 0.],
[2., 1., 2., 0.],
[2., 1., 3., 0.],
[2., 2., 1., 1.],
[2., 2., 2., 1.],
[2., 2., 3., 1.],
[2., 3., 1., 2.],
[2., 3., 2., 3.],
[2., 3., 3., 1.]])
Starting at column 2 means that every column 1 cluster has equal weight.
>>> boot = Bootstrapper(random_state=1, kind="weights")
>>> boot.fit(data, skip=None)
>>> boot.transform(data, start=2)
array([[1., 1., 1., 2.],
[1., 1., 2., 0.],
[1., 1., 3., 1.],
[1., 2., 1., 0.],
[1., 2., 2., 1.],
[1., 2., 3., 2.],
[1., 3., 1., 2.],
[1., 3., 2., 0.],
[1., 3., 3., 1.],
[2., 1., 1., 1.],
[2., 1., 2., 1.],
[2., 1., 3., 1.],
[2., 2., 1., 1.],
[2., 2., 2., 0.],
[2., 2., 3., 2.],
[2., 3., 1., 1.],
[2., 3., 2., 1.],
[2., 3., 3., 1.]])
Skipping column 2 results in only column 1 clusters being resampled.
>>> boot = Bootstrapper(random_state=1, kind="weights")
>>> boot.fit(data, skip=[2])
>>> boot.transform(data, start=1)
array([[1., 1., 1., 2.],
[1., 1., 2., 2.],
[1., 1., 3., 2.],
[1., 2., 1., 0.],
[1., 2., 2., 0.],
[1., 2., 3., 0.],
[1., 3., 1., 1.],
[1., 3., 2., 1.],
[1., 3., 3., 1.],
[2., 1., 1., 0.],
[2., 1., 2., 0.],
[2., 1., 3., 0.],
[2., 2., 1., 1.],
[2., 2., 2., 1.],
[2., 2., 3., 1.],
[2., 3., 1., 2.],
[2., 3., 2., 2.],
[2., 3., 3., 2.]])
Changing the algorithm to "indexes" gives a more familiar result.
>>> boot = Bootstrapper(random_state=1, kind="indexes")
>>> boot.fit(data, skip=None)
>>> boot.transform(data, start=1)
array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 3., 1.],
[1., 1., 3., 1.],
[1., 1., 3., 1.],
[1., 3., 1., 1.],
[1., 3., 2., 1.],
[1., 3., 3., 1.],
[2., 2., 1., 1.],
[2., 2., 2., 1.],
[2., 2., 3., 1.],
[2., 3., 1., 1.],
[2., 3., 1., 1.],
[2., 3., 2., 1.],
[2., 3., 2., 1.],
[2., 3., 2., 1.],
[2., 3., 3., 1.]])
The Bayesian bootstrap is the same as the Efron bootstrap, but allows
the resampled weights to take any real value up to the sum of the original
weights in that cluster.
>>> boot = Bootstrapper(random_state=2, kind="bayesian")
>>> boot.fit(data, skip=None)
>>> boot.transform(data, start=1)
array([[1. , 1. , 1. , 0.92438197],
[1. , 1. , 2. , 1.65820553],
[1. , 1. , 3. , 1.31019207],
[1. , 2. , 1. , 3.68556477],
[1. , 2. , 2. , 0.782951 ],
[1. , 2. , 3. , 0.01428243],
[1. , 3. , 1. , 0.03969449],
[1. , 3. , 2. , 0.04616013],
[1. , 3. , 3. , 0.53856761],
[2. , 1. , 1. , 4.4725425 ],
[2. , 1. , 2. , 1.83458204],
[2. , 1. , 3. , 0.16269176],
[2. , 2. , 1. , 0.53223701],
[2. , 2. , 2. , 0.37478853],
[2. , 2. , 3. , 0.07456895],
[2. , 3. , 1. , 0.27616575],
[2. , 3. , 2. , 0.11271856],
[2. , 3. , 3. , 1.15970489]])
"""
#: ("weights", "indexes", "bayesian) The three possible arguments that
# can be provided to the "kind" keyword argument.
_BOOTSTRAP_ALGORITHMS = tuple(["weights", "indexes", "bayesian"])
def __init__(self, random_state=None, kind="weights"):
self.random_generator = np.random.default_rng(random_state)
# this is a bit hacky, but we use the numpy generator to seed Numba
# this makes it both reproducible and thread-safe enough
nb_seed = self.random_generator.integers(low=2 ** 32 - 1)
set_numba_random_state(nb_seed)
if kind in self._BOOTSTRAP_ALGORITHMS:
self.kind = kind
else:
raise KeyError("Invalid 'kind' argument.")
def fit(self, data, skip=None, y=-1):
"""Fit the bootstrapper to the target data.
Parameters
----------
data : 2D array
Target data. Must be lexicographically sorted.
sort : bool
Set to false is data is already sorted by row, by default True.
skip : list of integers, optional
Columns to skip in the bootstrap. Skip columns that were sampled
without replacement from the prior column, by default [].
y : int, optional
column index of the dependent variable, by default -1
Raises
------
ValueError
Raises error if the input data is not a numpy numeric array.
AttributeError
Raises error if the input data is not a numpy array.
"""
try:
if not np.issubdtype(data.dtype, np.number):
raise ValueError(
"Bootstrapper can only handle numeric datatypes. Please pre-process your data."
)
except AttributeError:
raise AttributeError("Bootstrapper can only handle numpy arrays. Please pre-process your data."
)
if skip is not None:
skip = list(skip)
for v in iter(skip):
if not isinstance(v, int):
raise IndexError(
"skip values must be integers corresponding to column indices."
)
if v >= data.shape[1] - 1:
raise IndexError("skip index out of bounds for this array.")
else:
skip = []
cluster_dict = id_cluster_counts(data[:, :y])
cluster_dict = tuple(reversed(list(cluster_dict.values())))
cluster_dict = tuple(map(tuple, cluster_dict))
y %= data.shape[1]
shape = y
columns_to_resample = np.array([True for k in range(shape)])
for key in skip:
columns_to_resample[key] = False
kind = str(self.kind)
self.transform = _bootstrapper_factory(
tuple(columns_to_resample), cluster_dict, shape, kind
)
def transform(self, data, start: int):
"""Generate a bootstrapped sample from target data.
Parameters
----------
data : 2D array
Target data. Must be sorted by row.
start : int
Column index of the first column to be bootstrapped.
Returns
-------
2D array
Array matching target data, but resampled with replacement
according to "kind" argument.
"""
raise Exception("Use fit() before using transform().")
@lru_cache()
def _bootstrapper_factory(columns_to_resample, clusternum_dict, shape, kind):
"""Factory function that returns the appropriate transform().
"""
clusternum_dict = tuple(map(np.array, clusternum_dict))
columns_to_resample = np.array(columns_to_resample)
if kind == "weights":
@jit(nopython=True)
def _bootstrapper_impl(data, start):
| |
# <NAME>
# <EMAIL>
# November 2014
# Stanford University
# This script calculates H12 and H2/H1 from population genomic data
import sys
from optparse import OptionParser
import copy
import csv
import linecache
import random
import time
import numpy
#import Bio
######################
def clusterSingleWindow(inFile, outFile, windowTot, distanceThreshold, numStrains, singleWindow):
# This definiton calculates H12, H2, and H1 in a single window centered around the coordinate specified by the user. If the coordinate is not in the file or within the bounds of a defineable window, then an error message is outputted.
# Count the number of lines in a file
countLines = open(inFile)
numberLines = len(countLines.readlines())
window = int(windowTot)/2 # this is the number of SNPs on each side that I use as part of my window.
lastSNP = numberLines - window
center=-1
# find the index of the center of the singleWindow
for lineNo in range(window, lastSNP):
coord=int(linecache.getline(inFile,lineNo).split(',')[0].strip('\n'))
if coord == singleWindow:
center=lineNo
if center!=-1: # this means that the center has not be found
flies = initialize(window, center, inFile)
runAllDefs(flies, center, distanceThreshold, inFile, outFile, window)
else:
print "ERROR: Specified coordinate was not found in file or within a defineable window"
######################
def clusterHaplotypes(inFile, outFile, windowTot, jump, distanceThreshold, numStrains):
# This definition iterates through the entire chromosome and calculates H12, H2, and H1 to each analysis window. First, we initialize the first analysis window. Since we cannot read in the entire chromosome into memory, we keep updating the 'flies' variable with data for the next analysis window by reading genomic data directly from the inFile.
# Count the number of lines in a file
countLines = open(inFile)
numberLines = len(countLines.readlines())
window = int(windowTot)/2 # this is the number of SNPs on each side that I use as part of my window.
lastSNP = numberLines - window
jump = int(jump) # this is the number of SNPs I iterate by
# Read the data into numStrains lists of Window SNPs long (200 on each side of the center)
# center is the variable I will call to assign the middle of the sweep region I am looking at.
# Store the haplotypes in a dictionary:
center = window +1
flies = initialize(window, center, inFile)
runAllDefs(flies, center, distanceThreshold, inFile, outFile, window)
####### now iterate through the rest of the genome and fill only the part of the list that needs to be filled ########
for center in range(window+1+jump, lastSNP, jump):
if 2*window +1 >= jump:
# remove SNPs from the left
for j in range(1,numStrains+1):
del flies[j][0:jump]
# Add SNPs to the right
for i in range(0,jump):
current_line_info = linecache.getline(inFile, center + window + i - jump + 1).split(',')
for j in range(1,numStrains+1):
flies[j].append(current_line_info[j].strip())
else:
# need to fix this -- will fail because the edge cases have not been taken care of.
flies = initialize(window, center, inFile)
runAllDefs(flies, center, distanceThreshold, inFile, outFile, window)
#######################
def runAllDefs(flies, center, distanceThreshold, inFile, outFile, window):
# This definition runs all other definitions to identify haplotypes, their clusters, and summary statistics. This is run for every analysis window.
# Count the haplotypes
haps = countHaps(flies)
# clump haplotypes that differ by some min threshold (including haps that differ by only an N:
[haps_clumped, haps_clumped_count] = clusterDiffs(haps, distanceThreshold)
# find all clusters with at least three haplotypes
clusters = findClusters(haps_clumped)
sizeVector = []
keyVector = []
if (len(clusters.keys()) == 0):
for key in haps_clumped.iterkeys():
sizeVector.append(1)
keyVector.append(key)
else:
[keyVector, sizeVector] = sortClusters(clusters,haps_clumped)
centerCoord = linecache.getline(inFile,center).split(',')[0]
edgeCoord1 = linecache.getline(inFile,center-window).split(',')[0]
edgeCoord2 = linecache.getline(inFile,center+window).split(',')[0]
absLengthWin = int(edgeCoord2)-int(edgeCoord1)
printClusters(inFile, outFile, centerCoord, clusters, haps_clumped, keyVector, sizeVector, absLengthWin, edgeCoord1, edgeCoord2)
#######################
def initialize(window, center, inFile):
# This definition intializes the flies dictionary. Flies takes in the strain number as the key and the haplotype as the value. The flies vector is populated with SNPs read directly from the inFile, using center as a marker as to which SNPs to read in.
flies = {}
for i in range(1,numStrains+1):
flies[i] = []
# Add SNPs to the left and the right
for i in range(0,window+1):
#left
for j in range(1,numStrains+1):
flies[j].append(linecache.getline(inFile,center-window +i).split(',')[j].strip())
for i in range(1,window+1):
#right
for j in range(1,numStrains+1):
flies[j].append(linecache.getline(inFile,center +i).split(',')[j].strip())
return flies
#######################
def countHaps(flies):
# In this definition, I will collapse all unique haplotypes into single instances in the haps dictionary (the key) and use a value to indicate the number of individuals with this haplotype.
# dictionary to store all haplotypes (to count max)
haps = {}
for j in range(1,numStrains+1):
line = ''.join(flies[j])
haps.setdefault(line,[]) # store in an array the line numbers corresponding to the haplotypes that comprise a cluster. Line numbers correspond to the processed data matrix (SNPs).
haps[line].append(j)
return haps
#################
def clusterDiffs(haps, distanceThreshold):
# In this definition I will cluster haplotypes that differ by some min threshold. If a haplotype matches another haplotype at all positions except for sites where there are Ns (missing data), then the haplotypes will be combined and the 'distance' between the two haplotypes will be considered 0. Only ATGC differnces between haplotypes will count towards the distance threshold.
distanceThreshold = int(distanceThreshold)
haps_clumped = {} # stored all the clumped haplotypes in this hash. I will pass this into def findClusters later on.
haps_clumped_count = {} # I would like to record the number of different unique haplotypes that are clumped -- will htis help me later to distinguish ancestral haplotypes?
# I need to keep track of which key has been compared
compared = {}
# Now calculate the distance between unique clustering haplotypes
for key1 in haps.iterkeys():
if (key1 in compared) == False:
compared[key1]=1
haps_clumped[key1] = haps[key1] # regardless of whether or not key1 matches anything, I need to include it in haps_clumped. Therefore I will initialize it with it's own array.
haps_clumped_count[key1] = 1
for key2 in haps.iterkeys():
if ((haps[key2][0] in haps_clumped[key1]) == False) and ((key2 in compared) == False):
[distance, s1]= hamming_distance_clump(key1, key2, distanceThreshold)
# If I replace an "N" in key1, I will replace the returned key1 in haps_clumped:
if distance == 0 and key1 != s1:
haps_clumped_count[s1] = haps_clumped_count[key1]
haps_clumped[s1] = haps_clumped[key1]
del haps_clumped_count[key1]
del haps_clumped[key1]
key1 = s1
if distance <= distanceThreshold:
# The reason why this extra if statement is here is so that I do not confuse merging missing data with clumping haplotypes with a min distance threshold
# store into the haps_clumped threshold:
haps_clumped[key1] += haps[key2] # add the array for key2 to key1 array
haps_clumped_count[key1] += 1
compared[key2] = 1 # this means that I won't check this distance again since it has been clumped.
return [haps_clumped, haps_clumped_count]
##################
def findClusters(haps):
# This definition identifies haplotypes present in the sample in at least 2 individuals.
n_min1=2
n_min2=2
# find the top clusters comprised of at least n_min members
clusters = {}
# flag for first cluster > n_min1 found
n_min1_found = False
for key in haps.iterkeys():
if len(haps[key]) > int(n_min1)-1:
clusters[key] = [] # Store the top clusters in this dictionary
n_min1_found = True
if n_min1_found == True:
for key in haps.iterkeys():
if (len(haps[key]) > int(n_min2)-1 and len(haps[key]) < int(n_min1) ):
clusters[key] = []
return clusters
####################
def sortClusters(clusters, haps):
# this definition sorts haplotype clusters in reverse order from largest to smallest. This sorting will help in the computation of haplotype homozygosity statistics H12, H2, and H1.
# First order the keys for each cluster from largest to smallest:
# Put everything in vectors that I can sort
keyVector = []
sizeVector = []
for key in clusters.iterkeys():
keyVector.append(key)
sizeVector.append(len(haps[key]))
# now sort using bubble sort (need to sort in place):
swapped = True
while swapped == True:
swapped = False
for i in range(0, len(sizeVector)-1):
if sizeVector[i] < sizeVector[i+1]:
tmpSize = sizeVector[i]
sizeVector[i] = sizeVector[i+1]
sizeVector[i+1] = tmpSize
tmpKey = keyVector[i]
keyVector[i] = keyVector[i+1]
keyVector[i+1]=tmpKey
swapped = True
return [keyVector, sizeVector]
######################
def printClusters(inFile, | |
'''
pass
class VIEW3D_MT_view_regions(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_view_viewpoint(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_volume_add(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_weight_gpencil(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_wpaint_vgroup_lock_pie(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_active_tool(
bpy_types.Panel, bpy_types._GenericUI,
bl_ui.space_toolsystem_common.ToolActivePanelHelper):
bl_category = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_PT_active_tool_duplicate(
bpy_types.Panel, bpy_types._GenericUI,
bl_ui.space_toolsystem_common.ToolActivePanelHelper):
bl_category = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, | |
<gh_stars>1-10
# encoding: UTF-8
import zmq
import itchat
import time
import os
import json
from befh.subscription_manager import SubscriptionManager
from befh.OkcoinAPI.OkcoinMarket import OkcoinMarket
from befh.FinexAPI.BitfinexMarket import BitfinexMarket
import logging
import re
import random
import numpy as np
def calcaccountsamount(TradeClients, exs, inss):
AccountsAmount = 0
BaseEx = "OkCoinCN"
insamount = {}
for inscon in inss:
insamount[inscon] = 0.0
for ex in exs:
if ex == BaseEx and ex in TradeClients.keys():
AccountsAmount = AccountsAmount + TradeClients[ex].amount['total']
for excon in TradeClients[ex].amount.keys():
for ins in inss:
if ins in excon:
insamount[ins] = insamount[ins] + TradeClients[ex].amount[excon]
elif ex != BaseEx and BaseEx in TradeClients.keys() and ex in TradeClients.keys():
client = TradeClients[ex]
for symbol in client.amount:
if "SPOT" in symbol:
ins = re.match(r"[a-zA-Z]+_([a-zA-Z]{3})[a-zA-Z]+$", symbol).group(1)
instmt = '_'.join(["SPOT", ins]) + TradeClients[BaseEx].currency
snapshot = '_'.join([BaseEx, instmt])
if snapshot in exchanges_snapshot.keys():
AccountsAmount = AccountsAmount + exchanges_snapshot[snapshot]["a1"] * client.amount[symbol]
elif "USD" == symbol:
AccountsAmount = AccountsAmount + client.fc.convert(client.amount[symbol], symbol, "CNY")
for ins in inss:
if ins in symbol:
insamount[ins] = insamount[ins] + client.amount[symbol]
for ins in inss:
instmt = '_'.join(["SPOT", ins]) + TradeClients[BaseEx].currency
snapshot = '_'.join([BaseEx, instmt])
if snapshot in exchanges_snapshot.keys():
insvalue = exchanges_snapshot[snapshot]["a1"] * insamount[ins]
logging.warning(ins + " " + "{:.4f}".format(insvalue))
return AccountsAmount
def LoadRecord(snapshot1, snapshot2, snapshot3, arbitragecode, arbitrage_record):
if arbitragecode in arbitrage_record.keys():
record = arbitrage_record[arbitragecode]
else:
record = {"isready": True, "detail": {}, "time": time.time()}
record["detail"][snapshot1] = {"iscompleted": True, "originalamount": 0.0, "remainamount": 0.0,
"orderid": 0, "executedamount": 0.0, "executedvolume": 0.0}
record["detail"][snapshot2] = {"iscompleted": True, "originalamount": 0.0, "remainamount": 0.0,
"orderid": 0, "executedamount": 0.0, "executedvolume": 0.0}
record["detail"][snapshot3] = {"iscompleted": True, "originalamount": 0.0, "remainamount": 0.0,
"orderid": 0, "executedamount": 0.0, "executedvolume": 0.0}
arbitrage_record[arbitragecode] = record
return record
def RefreshRecord(TradeClients, record, ex1, ex2, ins1, ins2, arbitrage_record, arbitragecode, globalvar,
arbitrage_direction):
client1 = TradeClients[ex1]
client2 = TradeClients[ex2]
instmt1 = '_'.join(["SPOT", ins1]) + client1.currency
instmt2 = '_'.join(["SPOT", ins2]) + ins1
instmt3 = '_'.join(["SPOT", ins2]) + client1.currency
snapshot1 = '_'.join([ex1, instmt1])
snapshot2 = '_'.join([ex2, instmt2])
snapshot3 = '_'.join([ex1, instmt3])
threshhold = globalvar["threshhold"]
profit = 0
updateaccount = False
if not record["isready"] and record["detail"][snapshot1]["iscompleted"] and record["detail"][snapshot2][
"iscompleted"] and record["detail"][snapshot3]["iscompleted"]:
if record["detail"][snapshot1]["executedvolume"] != 0 \
and record["detail"][snapshot2]["executedvolume"] != 0 \
and record["detail"][snapshot3]["executedvolume"] != 0:
if ex1 + ex2 + ins1 + ins2 == arbitragecode:
profit = 1 / (record["detail"][snapshot2]["executedamount"] / record["detail"][snapshot2][
"executedvolume"]) * (
record["detail"][snapshot3]["executedamount"] / record["detail"][snapshot3][
"executedvolume"]) / (
record["detail"][snapshot1]["executedamount"] / record["detail"][snapshot1][
"executedvolume"]) - 1
else:
profit = (record["detail"][snapshot2]["executedamount"] / record["detail"][snapshot2][
"executedvolume"]) * (
record["detail"][snapshot1]["executedamount"] / record["detail"][snapshot1][
"executedvolume"]) / (
record["detail"][snapshot3]["executedamount"] / record["detail"][snapshot3][
"executedvolume"]) - 1
record["isready"] = True
record["time"] = time.time()
record["detail"][snapshot1]["iscompleted"] = True
record["detail"][snapshot1]["originalamount"] = 0.0
record["detail"][snapshot1]["orderid"] = 0
record["detail"][snapshot1]["remainamount"] = 0.0
record["detail"][snapshot1]["executedamount"] = 0.0
record["detail"][snapshot1]["executedvolume"] = 0.0
record["detail"][snapshot2]["iscompleted"] = True
record["detail"][snapshot2]["originalamount"] = 0.0
record["detail"][snapshot2]["orderid"] = 0
record["detail"][snapshot2]["remainamount"] = 0.0
record["detail"][snapshot2]["executedamount"] = 0.0
record["detail"][snapshot2]["executedvolume"] = 0.0
record["detail"][snapshot3]["iscompleted"] = True
record["detail"][snapshot3]["originalamount"] = 0.0
record["detail"][snapshot3]["orderid"] = 0
record["detail"][snapshot3]["remainamount"] = 0.0
record["detail"][snapshot3]["executedamount"] = 0.0
record["detail"][snapshot3]["executedvolume"] = 0.0
updateaccount = True
# update arbitrage_record
arbitrage_record[arbitragecode] = record
# update account immediately
if updateaccount:
client1.get_info()
client2.get_info()
logging.warning(arbitragecode + " " + "{:.4f}".format(
calcaccountsamount(TradeClients, [ex1, ex2], [ins1, ins2])) + " profit:" + "{:.2%}".format(profit))
transcode = "_".join([ex1, ex2, ins1, ins2])
if transcode not in globalvar.keys():
globalvar[transcode] = 0
if record["isready"] and time.time() - globalvar[transcode] > 60:
globalvar[transcode] = time.time()
if not updateaccount:
client1.get_info()
client2.get_info()
logging.warning(
ex1 + ex2 + " " + "{:.4f}".format(calcaccountsamount(TradeClients, [ex1, ex2], [ins1, ins2])))
# rebalance accounts
availablemoney = client1.available[instmt1] * exchanges_snapshot[snapshot1]["a1"]
if availablemoney > 1.5 * threshhold and client2.available['_'.join(["SPOT", ins1]) + client2.currency] < 100 * \
globalvar[ins1]:
client1.withdrawcoin(instmt1,
np.floor((availablemoney - 0.5 * threshhold) / threshhold) * threshhold /
exchanges_snapshot[snapshot1]["a1"] * (1 - random.random() / 100),
client2.address[ins1],
"address")
availablemoney = client2.available['_'.join(["SPOT", ins2]) + client2.currency] * \
exchanges_snapshot[snapshot3]["a1"]
if availablemoney > 1.5 * threshhold and client1.available[instmt3] < 100 * globalvar[ins2]:
client2.withdrawcoin(ins2,
np.floor((availablemoney - 0.5 * threshhold) / threshhold) * threshhold /
exchanges_snapshot[snapshot3]["b1"] * (1 - random.random() / 100),
client1.address[ins2],
"")
availablemoney = client1.available[instmt3] * exchanges_snapshot[snapshot3]["a1"]
if availablemoney > 1.5 * threshhold and client2.available['_'.join(["SPOT", ins2]) + client2.currency] < 100 * \
globalvar[ins2]:
client1.withdrawcoin(instmt3,
np.floor((availablemoney - 0.5 * threshhold) / threshhold) * threshhold /
exchanges_snapshot[snapshot3]["a1"] * (1 - random.random() / 100),
client2.address[ins2],
"address")
availablemoney = client2.available['_'.join(["SPOT", ins1]) + client2.currency] * exchanges_snapshot[snapshot1][
"a1"]
if availablemoney > 1.5 * threshhold and client1.available[instmt1] < 100 * globalvar[ins1]:
client2.withdrawcoin(ins1,
np.floor((availablemoney - 0.5 * threshhold) / threshhold) * threshhold /
exchanges_snapshot[snapshot1]["b1"] * (1 - random.random() / 100),
client1.address[ins1],
"")
return record
def ReplaceOrder(instmt, insthresh, record, snapshot, client):
if not record["detail"][snapshot]["iscompleted"]:
if record["detail"][snapshot]["orderid"] in client.orderids and ((client.orders[record["detail"][snapshot][
"orderid"]].side == "buy" and client.orders[record["detail"][snapshot]["orderid"]].price ==
exchanges_snapshot[snapshot]["a1"]) or (
client.orders[record["detail"][snapshot]["orderid"]].side == "sell" and client.orders[
record["detail"][snapshot]["orderid"]].price == exchanges_snapshot[snapshot]["b1"])):
pass
else:
status, order = client.cancelorder(instmt, record["detail"][snapshot]["orderid"])
if order.remaining_amount > insthresh:
if order.side == "sell":
orderid = client.sell(instmt, order.remaining_amount, exchanges_snapshot[snapshot]["b1"])
elif order.side == "buy":
orderid = client.buy(instmt, order.remaining_amount, exchanges_snapshot[snapshot]["a1"])
if isinstance(orderid, str) and "Invalid order: not enough exchange balance for" in orderid:
record["detail"][snapshot]["iscompleted"] = True
assert isinstance(orderid, int), "orderid(%s) = %s" % (type(orderid), orderid)
record["detail"][snapshot]["orderid"] = orderid
else:
record["detail"][snapshot]["iscompleted"] = True
record["detail"][snapshot]["executedamount"] = record["detail"][snapshot][
"executedamount"] + order.avg_execution_price * order.executed_amount
record["detail"][snapshot]["executedvolume"] = record["detail"][snapshot][
"executedvolume"] + order.executed_amount
return record
def UpdateRecord(client, record, instmt, orderid, snapshot, amount):
status, order = client.orderstatus(instmt, orderid)
executedamount = 0
executedvolume = 0
if status:
executedamount = order.avg_execution_price * order.executed_amount
executedvolume = order.executed_amount
record["detail"][snapshot] = {"iscompleted": status, "originalamount": amount, "remainamount": 0.0,
"orderid": orderid, "executedamount": executedamount,
"executedvolume": executedvolume}
def Exchange3Arbitrage(globalvar, mjson, exchanges_snapshot, TradeClients, ex1, ex2, ins1, ins2, ins1thresh, ins2thresh,
ratiothreshhold=0.01):
keys = exchanges_snapshot.keys()
client1 = TradeClients[ex1]
client2 = TradeClients[ex2]
instmt1 = '_'.join(["SPOT", ins1]) + client1.currency
instmt2 = '_'.join(["SPOT", ins2]) + ins1
instmt3 = '_'.join(["SPOT", ins2]) + client1.currency
snapshot1 = '_'.join([ex1, instmt1])
snapshot2 = '_'.join([ex2, instmt2])
snapshot3 = '_'.join([ex1, instmt3])
arbitrage_direction = 0
if (mjson["exchange"] in [ex1, ex2]) and (mjson["instmt"] in [instmt1, instmt2, instmt3]) and (
snapshot1 in keys) and (snapshot2 in keys) and (snapshot3 in keys):
"""BTC->ETH套利"""
# 记录套利完成情况
arbitragecode = ex1 + ex2 + ins1 + ins2
if arbitragecode not in arbitrage_record.keys():
record = LoadRecord(snapshot1, snapshot2, snapshot3, arbitragecode, arbitrage_record)
RefreshRecord(TradeClients, record, ex1, ex2, ins1, ins2, arbitrage_record, arbitragecode, globalvar,
arbitrage_direction)
else:
record = LoadRecord(snapshot1, snapshot2, snapshot3, arbitragecode, arbitrage_record)
if record["isready"]:
# 计算是否有盈利空间
ratio = 1 / exchanges_snapshot[snapshot2]["a1"] * exchanges_snapshot[snapshot3][
"b1"] / exchanges_snapshot[snapshot1]["a1"] - 1
if ratio > ratiothreshhold:
arbitrage_direction = 1
executed = False
amountbasic = min(exchanges_snapshot[snapshot2]["a1"] * exchanges_snapshot[snapshot2]["aq1"],
exchanges_snapshot[snapshot1]["aq1"], exchanges_snapshot[snapshot3][
"bq1"] * exchanges_snapshot[snapshot3]["b1"] / exchanges_snapshot[snapshot1][
"a1"])
amount3 = client1.available[instmt3] * exchanges_snapshot[snapshot3]["b1"] / \
exchanges_snapshot[snapshot1]["a1"] - ins1thresh
amount2 = client2.available['_'.join(["SPOT", ins1]) + client2.currency] - ins1thresh
amount = min(amountbasic, amount3, amount2)
if client1.available[client1.currency] / exchanges_snapshot[snapshot1]["a1"] < amount + ins1thresh:
orderid3 = client1.sell(instmt3, amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"],
exchanges_snapshot[snapshot3]["b1"])
assert isinstance(orderid3, int), "orderid(%s) = %s" % (type(orderid3), orderid3)
UpdateRecord(client1, record, instmt3, orderid3, snapshot3,
amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"])
record["detail"][snapshot1]["iscompleted"] = True
record["detail"][snapshot2]["iscompleted"] = True
executed = True
elif amount >= ins1thresh and amount * exchanges_snapshot[snapshot1]["a1"] / \
exchanges_snapshot[snapshot3]["b1"] >= ins2thresh:
try:
orderid3 = client1.sell(instmt3, amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"],
exchanges_snapshot[snapshot3]["b1"])
io = 0
while not isinstance(orderid3, int) and io < 5:
orderid3 = client1.sell(instmt3, amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"],
exchanges_snapshot[snapshot3]["b1"])
io = io + 1
except Exception as e:
orderid3 = client1.sell(instmt3, amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"],
exchanges_snapshot[snapshot3]["b1"])
try:
orderid1 = client1.buy(instmt1, amount, exchanges_snapshot[snapshot1]["a1"])
io = 0
while not isinstance(orderid1, int) and io < 5:
orderid1 = client1.buy(instmt1, amount, exchanges_snapshot[snapshot1]["a1"])
io = io + 1
except Exception as e:
orderid1 = client1.buy(instmt1, amount, exchanges_snapshot[snapshot1]["a1"])
try:
orderid2 = client2.buy(instmt2, amount / exchanges_snapshot[snapshot2]["a1"],
exchanges_snapshot[snapshot2]["a1"])
io = 0
while not isinstance(orderid2, int) and io < 5:
orderid2 = client2.buy(instmt2, amount / exchanges_snapshot[snapshot2]["a1"],
exchanges_snapshot[snapshot2]["a1"])
io = io + 1
except Exception as e:
orderid2 = client2.buy(instmt2, amount / exchanges_snapshot[snapshot2]["a1"],
exchanges_snapshot[snapshot2]["a1"])
if isinstance(orderid3, int):
UpdateRecord(client1, record, instmt3, orderid3, snapshot3,
amount * exchanges_snapshot[snapshot1]["a1"] /
exchanges_snapshot[snapshot3]["b1"])
if isinstance(orderid1, int):
UpdateRecord(client1, record, instmt1, orderid1, snapshot1, amount)
if isinstance(orderid2, int):
UpdateRecord(client2, record, instmt2, orderid2, snapshot2,
amount / exchanges_snapshot[snapshot2]["a1"])
executed = True
else:
if arbitragecode not in globalvar.keys():
globalvar[arbitragecode] = time.time()
if time.time() - globalvar[arbitragecode] > 60:
globalvar[arbitragecode] = time.time()
logging.warning(
arbitragecode + " The arbitrage space is " + "{:.2%}".format(ratio) + " but no amount!")
if executed:
record["isready"] = False
else:
record = ReplaceOrder(instmt1, ins1thresh, record, snapshot1, | |
<gh_stars>1-10
# -*- coding: iso-8859-1 -*-
"""
This code creates the Results plots for Ranjan & Sasselov 2016b.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
from matplotlib.pyplot import cm
def cm2inch(cm): #function to convert cm to inches; useful for complying with Astrobiology size guidelines
return cm/2.54
########################
###Set important constants
########################
hc=1.98645e-9 #value of h*c in erg*nm, useful to convert from ergs/cm2/s/nm to photons/cm2/s/nm
########################
###Specific which family of plots to generate
########################
plot_intvsflux=False #Plot to demonstrate difference between surface radiance and surface flux.
plot_alb_zenithangle=False #plot to demonstrate the impact of albedo and zenith angle on emergent surface intensity
plot_co2_limits=False #this time, using a fixed level of N2 and variable amounts of CO2.
plot_altgas_limits=False #fixed level of N2, various levels of other gases.
plot_dosimeters_co2=True #Plots the convolution of the various action spectra with the surficial spectra for the co2 study, integrated, and normalized. Helps us see how these parameters vary.
plot_dosimeters_h2o=True #Plots the convolution of the various action spectra with the surficial spectra for h2o
plot_dosimeters_so2=True #Plots the convolution of the various action spectra with the surficial spectra for so2
plot_dosimeters_h2s=True #Plots the convolution of the various action spectra with the surficial spectra for h2s
plot_dosimeters_ch4=False #Plots the convolution of the various action spectra with the surficial spectra for ch4
plot_dosimeters_o2=False #Plots the convolution of the various action spectra with the surficial spectra for o2
plot_dosimeters_o3=False #Plots the convolution of the various action spectra with the surficial spectra for o3
############################
#######Plot to demonstrate the difference between surface flux and surface intensity
############################
if plot_intvsflux:
wav, toa_intensity, surface_flux, total_intensity,surface_intensity=np.genfromtxt('./TwoStreamOutput/rugheimer_earth_epoch0_a=newsnow_z=60.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,5,6), unpack=True)
fig1, (ax1)=plt.subplots(1, figsize=(8,5), sharex=True)
ax1.plot(wav, toa_intensity*wav/hc, linestyle='-', color='black',marker='s', label='TOA Flux')
ax1.plot(wav, total_intensity*wav/hc, linestyle='-', color='red', marker='s',label='BOA Actinic Flux')
ax1.plot(wav, surface_intensity*wav/hc,linestyle='-', color='purple', marker='s',label='Surface Radiance')
ax1.plot(wav, surface_flux*wav/hc, linestyle='-', color='blue', marker='s',label='Surface Flux')
ylimits=[1.e9, 1.e15]
ax1.set_title('R+2015 Atmosphere, A=New Snow, SZA=60')
ax1.legend(loc=0, ncol=1, borderaxespad=0.)
ax1.set_yscale('log')
ax1.set_ylabel(r'photons/s/cm$^2$/nm')
ax1.set_ylim(ylimits)
ax1.set_xlim([130.,500.])
ax1.set_xlabel('nm')
#plt.tight_layout(rect=(0,0,1,0.9))
plt.savefig('./Plots/intvsflux.eps', orientation='portrait',papertype='letter', format='eps')
############################
#######Plot to demonstrate impact of albedo and zenith angle on emergent surface intensity. We use the Rugheimer prebiotic atmosphere model (mixing ratios, surface pressure, T/P profile) for this reference system.
############################
if plot_alb_zenithangle:
###When importing files, variables are:
###wav_x: centers of wavelength bins, nm
###toa_intensity_x: top-of-atmosphere intensity (incident), erg/s/nm/cm2
###surface_flux_x: total flux incident on surface, erg/s/nm/cm2
###surface_intensity_x: total intensity incident on the surface (direct+diffuse), erg/s/nm/cm2
###surface_intensity_diffuse_x: diffuse total intensity incident on the surface, erg/s/nm/cm2
###surface_intensity_direct_x: direct total intensity incident on the surface, erg/s/nm/cm2
#####Set up info about the files to extract
albedolist=['1', 'newsnow', 'oldsnow', '0.2', 'desert', 'ocean', 'tundra', '0'] #list of albedos we consider
AlbedoLabels=['1', 'New Snow', 'Old Snow', '0.2', 'Desert', 'Ocean', 'Tundra', '0']#labels for the figure
#####Plot Figure
#Step 1: Initialize Figure
fig1, (ax1, ax2, ax3)=plt.subplots(3, figsize=(cm2inch(16.5),10), sharex=True)
colorseq1=iter(cm.rainbow(np.linspace(0,1,len(albedolist))))
colorseq2=iter(cm.rainbow(np.linspace(0,1,len(albedolist))))
colorseq3=iter(cm.rainbow(np.linspace(0,1,len(albedolist))))
#Step 2: loop over all files, plot figure
for albind in range(0, len(albedolist)):
albedo=albedolist[albind]
#Load file
wav_z_0, toa_intensity_z_0, surface_flux_z_0, surface_intensity_z_0, surface_intensity_diffuse_z_0, surface_intensity_direct_z_0=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a='+albedo+'_z=0.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True) #albedo=desert, zenith angle=0 degrees
wav_z_48p2, toa_intensity_z_48p2, surface_flux_z_48p2, surface_intensity_z_48p2, surface_intensity_diffuse_z_48p2, surface_intensity_direct_z_48p2=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a='+albedo+'_z=48.2.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True) #albedo=desert, zenith angle=0 degrees
wav_z_66p5, toa_intensity_z_66p5, surface_flux_z_66p5, surface_intensity_z_66p5, surface_intensity_diffuse_z_66p5, surface_intensity_direct_z_66p5=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a='+albedo+'_z=66.5.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True) #albedo=desert, zenith angle=0 degrees
if albind==0: #Initialize with TOA flux
ax1.plot(wav_z_0, toa_intensity_z_0*wav_z_0/hc, marker='.', color='black', label=r'TOA Flux')
ax2.plot(wav_z_48p2, toa_intensity_z_48p2*wav_z_48p2/hc, marker='.', color='black', label=r'TOA Flux')
ax3.plot(wav_z_66p5, toa_intensity_z_66p5*wav_z_66p5/hc, marker='.', color='black', label=r'TOA Flux')
ax1.plot(wav_z_0, surface_intensity_z_0*wav_z_0/hc, marker='.', color=next(colorseq1), label=r'A='+AlbedoLabels[albind])
ax2.plot(wav_z_48p2, surface_intensity_z_48p2*wav_z_48p2/hc, marker='.', color=next(colorseq2), label=r'A='+AlbedoLabels[albind])
ax3.plot(wav_z_66p5, surface_intensity_z_66p5*wav_z_66p5/hc, marker='.', color=next(colorseq3), label=r'A='+AlbedoLabels[albind])
#Step 3: Clean up figure
ylimits=[1.e9, 1.e15]
ax1.set_title(r'z=0$^\circ$')
ax1.legend(bbox_to_anchor=[0, 1.13, 1., .152], loc=3, ncol=3, mode='expand', borderaxespad=0., fontsize=10)
ax2.set_title(r'z=48.2$^\circ$')
ax3.set_title(r'z=66.5$^\circ$')
ax1.set_yscale('log')
ax1.set_ylabel(r'photons/s/cm$^2$/nm')
ax1.set_ylim(ylimits)
ax2.set_yscale('log')
ax2.set_ylabel(r'photons/s/cm$^2$/nm')
ax2.set_ylim(ylimits)
ax3.set_yscale('log')
ax3.set_ylabel(r'photons/s/cm$^2$/nm')
ax3.set_ylim(ylimits)
ax3.set_xlim([100.,500.])
ax3.set_xlabel('nm')
plt.tight_layout(rect=(0,0,1,0.9))
plt.savefig('./Plots/paperplots_a_z_dependence.eps', orientation='portrait',papertype='letter', format='eps')
if plot_co2_limits:
###When importing files, variables are:
###ind 0: wav_x: centers of wavelength bins, nm
###ind 1: toa_intensity_x: top-of-atmosphere intensity (incident), erg/s/nm/cm2
###ind 2: surface_flux_x: total flux incident on surface, erg/s/nm/cm2
###ind 3: surface_intensity_x: total intensity incident on the surface (direct+diffuse), erg/s/nm/cm2
###ind 4: surface_intensity_diffuse_x: diffuse total intensity incident on the surface, erg/s/nm/cm2
###ind 5: surface_intensity_direct_x: direct total intensity incident on the surface, erg/s/nm/cm2
###############Set up info about files to extract
N_co2_base=2.09e24 #column density of CO2 in base case (Rugheimer+2015)
co2multiplelist=[0., 1.e-6,1.e-5, 1.e-4, 1.e-3, 0.00893, 1.e-2, 1.e-1, 0.6, 1., 1.33, 1.e1, 46.6, 1.e2, 470., 1.e3]
co2dict={}
isphysical=[False, False, False, False, False, True, False, False, True, True, True, False, True, False, True, False] #which of these models have a physically motivated column depth
###############Read in base Rugheimer abundance cases
wav_max_rugheimer, toa_intensity_max_rugheimer, surface_flux_max_rugheimer, surface_intensity_max_rugheimer, surface_intensity_diffuse_max_rugheimer, surface_intensity_direct_max_rugheimer=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a=newsnow_z=0.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True)
wav_min_rugheimer, toa_intensity_min_rugheimer, surface_flux_min_rugheimer, surface_intensity_min_rugheimer, surface_intensity_diffuse_min_rugheimer, surface_intensity_direct_min_rugheimer=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a=tundra_z=66.5.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True)
##############Figure comparing surface intensity under different levels of pCO2, and different values for A and Z
#Set up figure basics outside the loop.
fig1, (ax1, ax2)=plt.subplots(2, figsize=(cm2inch(16.5),10), sharex=True)
colorseq1=iter(cm.rainbow(np.linspace(0,1,len(co2multiplelist))))
colorseq2=iter(cm.rainbow(np.linspace(0,1,len(co2multiplelist))))
#Plot TOA intensities
ax1.plot(wav_max_rugheimer, toa_intensity_max_rugheimer*wav_min_rugheimer/hc, linestyle='-', color='black', label='TOA Flux')
ax2.plot(wav_min_rugheimer, toa_intensity_min_rugheimer*wav_min_rugheimer/hc, linestyle='-', color='black', label='TOA Flux')
#In a loop, load the intensities and plot
for ind in range(0, len(co2multiplelist)):
multiple=co2multiplelist[ind]
colden_co2=multiple*N_co2_base
if isphysical[ind]: #have the physically motivated models represented differently
linestylevar='-'
linewidthvar=1.
elif multiple==1.: #represent the base fiducial Rugheimer model with a different line
linestylevar='-'
linewidthvar=2.5
else: #the parametric exploration
linestylevar='--'
linewidthvar=1.
wav_max, toa_intensity_max, surface_flux_max, surface_intensity_max, surface_intensity_diffuse_max, surface_intensity_direct_max=np.genfromtxt('./TwoStreamOutput/CO2lim/surface_intensities_co2limits_co2multiple='+str(multiple)+'_a=newsnow_z=0.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True) #maximum intensity for given atmosphere
wav_min, toa_intensity_min, surface_flux_min, surface_intensity_min, surface_intensity_diffuse_min, surface_intensity_direct_min=np.genfromtxt('./TwoStreamOutput/CO2lim/surface_intensities_co2limits_co2multiple='+str(multiple)+'_a=tundra_z=66.5.dat', skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True) #minimum maximum intensity for given atmosphere
co2dict[str(multiple)]=surface_intensity_max
ax1.plot(wav_max,surface_intensity_max*wav_max/hc, linestyle=linestylevar, linewidth=linewidthvar, color=next(colorseq1), label=r'$N_{CO_{2}}=$'+'{:.2E}'.format(colden_co2)+' cm$^{-2}$')
ax2.plot(wav_min,surface_intensity_min*wav_min/hc, linestyle=linestylevar, linewidth=linewidthvar, color=next(colorseq2), label=r'$N_{CO_{2}}=$'+'{:.2E}'.format(colden_co2)+' cm$^{-2}$')
#print (co2dict[str(co2multiplelist[8])])/(co2dict[str(co2multiplelist[0])])
#pdb.set_trace()
#Set up fine detail on figure
ylimits=[1e7, 1e15]
ax1.set_title(r'z=0$^\circ$, A=Fresh Snow')
ax2.set_title(r'z=66.5$^\circ$, A=Tundra')
ax1.legend(bbox_to_anchor=[0, 1.1, 1., .5], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
ax1.set_yscale('log')
ax1.set_ylabel(r'photons/s/cm$^2$/nm')
ax1.set_ylim(ylimits)
ax2.set_yscale('log')
ax2.set_ylabel(r'photons/s/cm$^2$/nm')
ax2.set_ylim(ylimits)
ax2.set_xlim([100.,500.])
ax2.set_xlabel('nm')
plt.tight_layout(rect=(0,0,1,0.75))
plt.savefig('./Plots/paperplots_co2_radiance.eps', orientation='portrait',papertype='letter', format='eps')
if plot_altgas_limits:
###When importing files, variables are:
###ind 0: wav_x: centers of wavelength bins, nm
###ind 1: toa_intensity_x: top-of-atmosphere intensity (incident), erg/s/nm/cm2
###ind 2: surface_flux_x: total flux incident on surface, erg/s/nm/cm2
###ind 3: surface_intensity_x: total intensity incident on the surface (direct+diffuse), erg/s/nm/cm2
###ind 4: surface_intensity_diffuse_x: diffuse total intensity incident on the surface, erg/s/nm/cm2
###ind 5: surface_intensity_direct_x: direct total intensity incident on the surface, erg/s/nm/cm2
#####Set up info about the files to extract ##Maximum possible natural surface radiance case (z=0, albedo=fresh snow) aka "max"
N_tot=2.0925e25#total column density of Rugheimer+2015 model in cm**-2
gaslist=['h2o', 'ch4', 'so2', 'o2', 'o3', 'h2s'] #list of gases we are doing this for
gaslabellist=['H2O', 'CH4', 'SO2', 'O2', 'O3', 'H2S'] #list of nicely formated gas names for plotting
base_abundances=np.array([4.762e-3, 1.647e-6, 3.371e-11, 2.707e-6, 9.160e-11, 6.742e-11]) #molar concentration of each of these gases in the Rugheimer model.
#dict holding the multiples of the molar concentration we are using
gasmultiples={}
gasmultiples['h2o']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3])
gasmultiples['ch4']=np.array([1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3])
gasmultiples['so2']=np.array([1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
gasmultiples['o2']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['o3']=np.array([1., 1.e1, 1.e2, 1.e3])
gasmultiples['h2s']=np.array([1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
#####In a loop, extract and plot the files
for gasind in range(0, len(gaslist)):
gas=gaslist[gasind]
base_abundance=base_abundances[gasind]
multiples=gasmultiples[gas]
gaslabel=gaslabellist[gasind]
#####Set up figure basics
fig, (ax1)=plt.subplots(1, figsize=(cm2inch(16.5),7), sharex=True)
colorseq=iter(cm.rainbow(np.linspace(0,1,len(multiples))))
for multind in range(0, len(multiples)):
multiple=multiples[multind]
colden_X=base_abundance*multiple*N_tot #total column density of gas X
datafile='./TwoStreamOutput/gaslim/surface_intensities_'+gas+'limits_'+gas+'multiple='+str(multiple)+'_a=newsnow_z=0.dat'
wav, toa_intensity, surface_flux, surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(datafile, skip_header=1, skip_footer=0, usecols=(2,3,4,6,7,8), unpack=True)
if multind==0:
ax1.plot(wav,toa_intensity*wav/hc, linestyle='-', linewidth=1, color='black', label=r'TOA Flux')
if multiple==1.: #represent the base fiducial Rugheimer model with a different line
linestylevar='-'
linewidthvar=2.0
else: #the parametric exploration
linestylevar='--'
linewidthvar=1.
ax1.plot(wav,surface_intensity*wav/hc, linestyle=linestylevar,linewidth=linewidthvar, color=next(colorseq), label=r'$N_{'+gaslabel+'}=$'+'{:.2E}'.format(colden_X)+' cm$^{-2}$')
#####Finalize and save figure
ax1.set_title(r'Varying Levels of '+gaslabel+r', (z=0$^\circ$, A=Fresh Snow)')
ax1.legend(bbox_to_anchor=[0, 1.1, 1., .5], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
ax1.set_yscale('log')
ax1.set_ylabel(r'photons/s/cm$^2$/nm')
ax1.set_ylim([1.e7, 1.e15])
ax1.set_xlim([100.,500.])
ax1.set_xlabel('nm')
plt.tight_layout(rect=(0,0,1,0.75))
plt.savefig('./Plots/paperplots_'+gas+'_radiance.eps', orientation='portrait',papertype='letter', format='eps')
if plot_dosimeters_co2:
###########First, import dosimeters
SZAs, albedos, N_CO2s, rad100_165s, rad200_300s, umpgly_193s, umpgly_230s, umpgly_254s,cucn3_254s, cucn3_300s=np.genfromtxt('./Doses/co2_uv_doses.dat', skip_header=2, skip_footer=0, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), unpack=True, delimiter=' ') #wavelength in nm, relative efficiency unitless
max_inds=np.arange(0, len(albedos)/2) #first half of data is max radiance case (SZA=0, A=fresh snow)
min_inds=np.arange(len(albedos)/2, len(albedos))#last half of the data is the minimum radiance case (SZA=66.5, A=tundra)
wordsworthind=5
kastingupperind=-4
###########Now, plot the dosimeters vs CO2 concentration
umpgly_193s_max_normed=umpgly_193s[max_inds]/umpgly_193s[max_inds[-7]]
umpgly_230s_max_normed=umpgly_230s[max_inds]/umpgly_230s[max_inds[-7]]
umpgly_254s_max_normed=umpgly_254s[max_inds]/umpgly_254s[max_inds[-7]]
cucn3_254s_max_normed=cucn3_254s[max_inds]/cucn3_254s[max_inds[-7]]
cucn3_300s_max_normed=cucn3_300s[max_inds]/cucn3_300s[max_inds[-7]]
umpgly_193s_min_normed=umpgly_193s[min_inds]/umpgly_193s[min_inds[-7]]
umpgly_230s_min_normed=umpgly_230s[min_inds]/umpgly_230s[min_inds[-7]]
umpgly_254s_min_normed=umpgly_254s[min_inds]/umpgly_254s[min_inds[-7]]
cucn3_254s_min_normed=cucn3_254s[min_inds]/cucn3_254s[min_inds[-7]]
cucn3_300s_min_normed=cucn3_300s[min_inds]/cucn3_300s[min_inds[-7]]
#Initialize plot basics
fig=plt.figure(figsize=(cm2inch(16.5),7))
gs=gridspec.GridSpec(2,2, hspace=0.40,wspace=0.35, width_ratios=[2,1], top=.77, bottom=.1, left=.1, right=.95)
ax1=plt.subplot(gs[0])
ax2=plt.subplot(gs[2])
ax3=plt.subplot(gs[1])
ax4=plt.subplot(gs[3])
colorseq1=iter(cm.rainbow(np.linspace(0,1,5)))
colorseq2=iter(cm.rainbow(np.linspace(0,1,5)))
colorseq3=iter(cm.rainbow(np.linspace(0,1,5)))
colorseq4=iter(cm.rainbow(np.linspace(0,1,5)))
#Plot max case
ax1.plot(N_CO2s[max_inds], umpgly_193s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq1), label=r'UMP Gly Bond Cleavage ($\lambda_0=193$)')
ax1.plot(N_CO2s[max_inds], umpgly_230s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq1), label=r'UMP Gly Bond Cleavage ($\lambda_0=230$)')
ax1.plot(N_CO2s[max_inds], umpgly_254s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq1), label=r'UMP Gly Bond Cleavage ($\lambda_0=254$)')
ax1.plot(N_CO2s[max_inds], cucn3_254s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq1), label=r'CuCN$_3$ Photoionization ($\lambda_0=254$)')
ax1.plot(N_CO2s[max_inds], cucn3_300s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq1), label=r'CuCN$_3$ Photoionization ($\lambda_0=300$)')
ax1.set_title('SZA=0, Albedo=New Snow')
ax1.axvline(N_CO2s[max_inds[-7]], color='black', linewidth=2) #Mark the Rugheimer fiducial value
ax1.axhline(1., color='black', linewidth=1) #Mark the Rugheimer fiducial value
ax1.axvline(N_CO2s[max_inds[wordsworthind]], color='black', linewidth=1, linestyle='--') #Wordsworth lower limit
#ax1.axvline(N_CO2s[max_inds[kastingupperind]], color='black', linewidth=1, linestyle='--') #Kasting upper limit
#Plot min case
ax2.set_title('SZA=66.5, Albedo=Tundra')
ax2.plot(N_CO2s[min_inds], umpgly_193s_min_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq2), label=r'UMP Gly Bond Cleavage ($\lambda_0=193$)')
ax2.plot(N_CO2s[min_inds], umpgly_230s_min_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq2), label=r'UMP Gly Bond Cleavage ($\lambda_0=230$)')
ax2.plot(N_CO2s[min_inds], umpgly_254s_min_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq2), label=r'UMP Gly Bond Cleavage ($\lambda_0=254$)')
ax2.plot(N_CO2s[min_inds], cucn3_254s_min_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq2), label=r'CuCN$_3$ Photoionization ($\lambda_0=254$)')
ax2.plot(N_CO2s[min_inds], cucn3_300s_min_normed,linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq2), label=r'CuCN$_3$ Photoionization ($\lambda_0=300$)')
ax2.axvline(N_CO2s[min_inds[-7]], color='black', linewidth=2) #Mark the Rugheimer fiducial value
ax2.axhline(1., color='black', linewidth=1) #Mark the Rugheimer fiducial value
ax2.axvline(N_CO2s[min_inds[wordsworthind]], color='black', linewidth=1, linestyle='--') #Wordsworth lower limit
#ax2.axvline(N_CO2s[min_inds[kastingupperind]], color='black', linewidth=1, linestyle='--') #Kasting upper limit
#Plot max case
ax3.plot(N_CO2s[max_inds], umpgly_193s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq3), label=r'UMP Gly Bond Cleavage ($\lambda_0=193$)')
ax3.plot(N_CO2s[max_inds], umpgly_230s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq3), label=r'UMP Gly Bond Cleavage ($\lambda_0=230$)')
ax3.plot(N_CO2s[max_inds], umpgly_254s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq3), label=r'UMP Gly Bond Cleavage ($\lambda_0=254$)')
ax3.plot(N_CO2s[max_inds], cucn3_254s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq3), label=r'CuCN$_3$ Photoionization ($\lambda_0=254$)')
ax3.plot(N_CO2s[max_inds], cucn3_300s_max_normed, linestyle='--',markersize=4, marker='s',linewidth=1, color=next(colorseq3), label=r'CuCN$_3$ Photoionization ($\lambda_0=300$)')
ax3.axvline(N_CO2s[max_inds[-7]], color='black', linewidth=2) #Mark the Rugheimer fiducial value
ax3.axhline(1., color='black', linewidth=1) #Mark the Rugheimer fiducial value
ax3.axvline(N_CO2s[max_inds[wordsworthind]], color='black', linewidth=1, linestyle='--') #Wordsworth lower limit
#ax3.axvline(N_CO2s[max_inds[kastingupperind]], color='black', linewidth=1, linestyle='--') #Kasting upper limit
#Plot | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 17:55:48 2013
@author: michal
"""
import scipy.interpolate as inter
import numpy as np
import matplotlib.pyplot as plt
import numpy.random
import os
os.system('rm /tmp/fig*')
import vtk
import vtk.util.numpy_support as VN
#G = np.zeros(9)
#G[1:5] = 1.
#G[5:] = 1./4.
#print G
#A = -0.152
NX = 64
NY = NX
nt = 3
pt = 1
plotMe = True
tau = 1.
cs2 = 1./3.
dt = 1.
dx = 1.
#def PR(rho):
#return c2*rho*(-b2**3*rho**3/64+b2**2*rho**2/16+b2*rho/4+1)*T/(1-b2*rho/4)**3-a2*rho**2
#def neighbors(arr,x,y,n=3):
# ''' Given a 2D-array, returns an nxn array whose "center" element is arr[x,y]'''
# arr=np.roll(np.roll(arr,shift=-x+1,axis=0),shift=-y+1,axis=1)
# return arr[:n,:n]
f_in = np.ones((NX,NY,9))
f_out = np.ones((NX,NY,9))
X,Y = np.meshgrid(np.arange(NX),np.arange(NY))
X = np.array(X.T,dtype=float)
Y = np.array(Y.T,dtype=float)
#rho = np.zeros_like(X)
#rho = np.sin(X / NX * 2. * np.pi) * np.sin(Y / NY * 2. * np.pi)
#ChemPot = np.sin(X / NX * 2. * np.pi) * np.sin(Y / NY * 2. * np.pi)
#gradX
#WbX = np.cos(X / NX * 2 * np.pi) / NX * 2. * np.pi * np.sin(Y / NY * 2. * np.pi)
#gradY
#WbY = np.cos(Y / NY * 2 * np.pi) / NY * 2. * np.pi * np.sin(X / NX * 2. * np.pi)
#Wb = Wb*cs2 - rho * Wb
#Laplace
#Wb = -np.sin(X / NX * 2 * np.pi) * (1. / NX * 2. * np.pi)**2
U = np.ones((NX,NY,2))
W = np.ones(9)
W[0] = 4./9.
W[1:5] = 1./9.
W[5:] = 1./36.
e = np.ndarray((2,9),dtype='int64')
e[0] = ( 0, 1, 0, -1, 0, 1, -1, -1, 1)
e[1] = ( 0, 0, 1, 0, -1, 1, 1, -1, -1)
e = e.T
X1 = X/NX
Y1 = Y/NY
#rho[:,:] = 1.1 - ((X1)**2 + (Y1)**2) / 2.
#rho = 1. + np.random.random(rho.shape) / 10.
#for i in range(0,NX):
# # 1 - 1/(1+exp(x))
h =20000.
R = np.sqrt( (X/NX -0.5)**2 + (Y/NY-0.5)**2 )
# #rl = 1. + 0.1 * ( (1. - 1./(1.+np.exp(h * ((1.*i)/NX - 0.25)))) - (1. - 1./(1.+np.exp(h * ((1.*i)/NX - 0.75)))))
#rho = 0.2 + (1./(1.+np.exp(h * (R - 0.25)))) * 0.6
rho = np.where(X<32, np.ones_like(R)*0.8,np.ones_like(R)*0.2)
beta = 0.01
rhos_v = 0.1
rhos_l = 1.
D = 4.
kappa = beta * D**2 *(rhos_l-rhos_v)**2 / 8.
print kappa
#rho = (rhos_v+rhos_l)/2. + (np.sin(np.pi*2.*X/NX) * np.sin(np.pi*2.*Y/NY))*0.05;
#
# rho[:,i] = rl
#plt.imshow(rho)
#plt.show()
#rl = 0.5
#rho = rl + 0.1 * (np.random.random_sample(rho.shape)-0.5) * 2.
#rho = 1
#rho[:,0:NY/2] = 0.2
#r = np.linspace(0.01, 3)
#E0 = beta*(r-rhos_v)**2*(r-rhos_l)**2
#ChemPot0 = 2.*beta*(r-rhos_l)**2*(r-rhos_v)+2.*beta*(r-rhos_l)*(r-rhos_v)**2
#p0 = r*ChemPot0 - E0
#plt.plot(r, p0)
#plt.show()
for i in range(0,9):
f_in[:,:,i] = rho[:,:] * W[i]
f_out[:,:,i] = f_in[:,:,i]
cu = np.zeros((NX,NY,2))
hist = list()
figid = 0
def gettt(i,j,scal):
return np.roll(np.roll(scal[:,:],shift=-i,axis=0),shift=-j,axis=1)
for it in range(0,nt):
#stream
for i in range(0,9):
f_in[:,:,i] = np.roll(np.roll(f_out[:,:,i],shift=e[i][0],axis=0),shift=e[i][1],axis=1)
rho[:,:] = np.sum( f_in[:,:,:], 2 )
U[:,:] = 0.
for i in range(1,9):
U[:,:,0] = U[:,:,0] + e[i][0]*f_in[:,:,i]
U[:,:,1] = U[:,:,1] + e[i][1]*f_in[:,:,i]
# U[:,:,0] = U[:,:,0] / rho[:,:]
# U[:,:,1] = U[:,:,1] / rho[:,:]
#F = np.zeros(U.shape)
#d = 1
#F[:,:,d] = 0.0001
#F[:,:,1] = 0.
#E0 = beta*(rho-rhos_v)**2*(rho-rhos_l)**2
#ChemPot0 = 2.*beta*(rho-rhos_l)**2*(rho-rhos_v)+2.*beta*(rho-rhos_l)*(rho-rhos_v)**2
ChemPot0 = 2.*beta*(rho-rhos_l)*(rho-rhos_v)*(2.*rho-rhos_v-rhos_l);
#p0 = rho*ChemPot0 - E0
LaplaceRho = np.zeros_like(rho)
GradRhoC = np.zeros_like(U)
GradRhoB = np.zeros_like(U)
ChemPot = np.zeros_like(rho)
GradChemPotC = np.zeros_like(U)
GradChemPotB = np.zeros_like(U)
for i in range(1,9):
scal = rho
shift = e[i]
LaplaceRho = LaplaceRho + W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2
shift = 0.
LaplaceRho = LaplaceRho - 2. * W[i] * scal[:,:] / cs2 / dt**2
shift = -e[i]
LaplaceRho = LaplaceRho + W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2
shift = e[i]
wh = 1.
GradRhoC[:,:,0] = GradRhoC[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradRhoC[:,:,1] = GradRhoC[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = -e[i]
wh = -1.
GradRhoC[:,:,0] = GradRhoC[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradRhoC[:,:,1] = GradRhoC[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = e[i]
wh = 4.
GradRhoB[:,:,0] = GradRhoB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradRhoB[:,:,1] = GradRhoB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = 2*e[i]
wh = -1.
GradRhoB[:,:,0] = GradRhoB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradRhoB[:,:,1] = GradRhoB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = 0*e[i]
wh = -3.
GradRhoB[:,:,0] = GradRhoB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradRhoB[:,:,1] = GradRhoB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
ChemPot = ChemPot0 - kappa*LaplaceRho
for i in range(1,9):
scal = ChemPot
shift = e[i]
wh = 1.
GradChemPotC[:,:,0] = GradChemPotC[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradChemPotC[:,:,1] = GradChemPotC[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = -e[i]
wh = -1.
GradChemPotC[:,:,0] = GradChemPotC[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradChemPotC[:,:,1] = GradChemPotC[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = e[i]
wh = 4.
GradChemPotB[:,:,0] = GradChemPotB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradChemPotB[:,:,1] = GradChemPotB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = 2*e[i]
wh = -1.
GradChemPotB[:,:,0] = GradChemPotB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradChemPotB[:,:,1] = GradChemPotB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
shift = 0*e[i]
wh = -3.
GradChemPotB[:,:,0] = GradChemPotB[:,:,0] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][0]
GradChemPotB[:,:,1] = GradChemPotB[:,:,1] + wh * W[i]*(np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / cs2 / dt**2 / 2. * e[i][1]
# gradients()
# VV = GradRhoB
# plt.plot(VV[1,:,1])
# plt.plot(VV[1,:,0])
# plt.plot(VV[:,1,1])
# plt.plot(VV[:,1,0])
#
# VV = GradRhoC
# plt.plot(VV[1,:,1], 'o')
# plt.plot(VV[1,:,0], 'o')
# plt.plot(VV[:,1,1], 'o')
# plt.plot(VV[:,1,0], 'o')
#
#
# plt.plot(WbX[1,:],'--',lw=3)
# plt.plot(WbX[:,1],'--',lw=3)
#
# plt.figure()
# plt.contourf(X,Y,rho)
# plt.quiver(X,Y,VV[:,:,0],VV[:,:,1])
# plt.show()
#GradChemPotM = 0.5 * ( GradChemPotB + GradChemPotC )
#GradRhoM = 0.5 * ( GradRhoB + GradRhoC )
GradC = np.zeros_like(GradRhoC)
GradB = np.zeros_like(GradRhoB)
GradC[:,:,0] = GradRhoC[:,:,0] * cs2 - rho * GradChemPotC[:,:,0]
GradC[:,:,1] = GradRhoC[:,:,1] * cs2 - rho * GradChemPotC[:,:,1]
GradB[:,:,0] = GradRhoB[:,:,0] * cs2 - rho * GradChemPotB[:,:,0]
GradB[:,:,1] = GradRhoB[:,:,1] * cs2 - rho * GradChemPotB[:,:,1]
GradM = 0.5 * ( GradC + GradB )
#F[1:-1,:,0] = F[1:-1,:,0] + 0.000001*rho[1:-1,:]
#F[0,:,:] = F[0,:,:] - U[0,:,:]
#F[NX-1,:,:] = F[NX-1,:,:] - U[NX-1,:,:]
# F[:,:,1] = F[:,:,1] / rho
# F[:,:,0] = F[:,:,0 ] / rho
U[:,:,0] = U[:,:,0] + dt * 0.5 * ( GradC[:,:,0] )
U[:,:,1] = U[:,:,1] + dt * 0.5 * ( GradC[:,:,1] )
U[:,:,0] = U[:,:,0] / rho[:,:]
U[:,:,1] = U[:,:,1] / rho[:,:]
#collide
#F[:,:,1] = GradRhoC[:,:,1] * cs2 - rho * GradChemPotC[:,:,1]
#F[:,:,0] = GradRhoC[:,:,0] * cs2 - rho * GradChemPotC[:,:,0]
#F[:,:,0] = GradM[:,:,0]
#F[:,:,1] = GradM[:,:,1]
#Test = np.zeros_like(U)
#Test = np.zeros_like(rho)
Grad_Cumm = np.zeros_like(U)
feq0 = np.zeros_like(f_in)
fB = np.zeros_like(f_in)
def compute(i):
#for i in range(9):
cu = 3. * ( U[:,:,0] * e[i,0] + U[:,:,1] * e[i,1])
feq0[:,:,i] = W[i] * rho[:,:] * (1. + cu[:,:] + 0.5*cu[:,:]*cu[:,:] - (3./2.) * ( U[:,:,0]**2 + U[:,:,1]**2 ) )
GradC_Directional = np.zeros_like(rho)
GradB_Directional = np.zeros_like(rho)
#GradC_Directional2 = np.zeros_like(rho)
#GradB_Directional2 = np.zeros_like(rho)
# if i > 0:
sub = 2. #* 3.
scal = rho * cs2
shift = e[i]
wh = 1.
GradC_Directional[:,:] = wh * (np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / sub
shift = -e[i]
wh = -1.
GradC_Directional[:,:] = \
GradC_Directional[:,:] + wh * (np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / sub
scal = rho * cs2
shift = 2 * e[i]
wh = -1.
GradB_Directional[:,:] = \
wh * (np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / sub
shift = e[i]
wh = 4.
GradB_Directional[:,:] = \
GradB_Directional[:,:] + wh * (np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / sub
shift = 0*e[i]
wh = -3.
GradB_Directional[:,:] = \
GradB_Directional[:,:] + wh * (np.roll(np.roll(scal[:,:],shift=-shift[0],axis=0),shift=-shift[1],axis=1)) / sub
scal = ChemPot
shift | |
# -*- coding: utf-8 -*-
from itertools import product
import os
import uuid
import dask
import dask.array as da
from dask.array.core import normalize_chunks
from dask.highlevelgraph import HighLevelGraph
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pyrap.tables as pt
import pytest
from daskms import xds_from_ms
from daskms.dataset import Dataset, Variable
from daskms.reads import read_datasets
from daskms.writes import write_datasets
from daskms.utils import (select_cols_str, group_cols_str,
index_cols_str, assert_liveness)
try:
import xarray as xr
except ImportError:
have_xarray = False
else:
have_xarray = True
@pytest.mark.parametrize("group_cols", [
["FIELD_ID", "SCAN_NUMBER"],
[]],
ids=group_cols_str)
@pytest.mark.parametrize("index_cols", [
["TIME", "ANTENNA1", "ANTENNA2"]],
ids=index_cols_str)
@pytest.mark.parametrize("select_cols", [
["STATE_ID", "TIME", "DATA"]],
ids=select_cols_str)
@pytest.mark.parametrize("shapes", [
{"row": 10, "chan": 16, "corr": 4}],
ids=lambda s: f"shapes={s}")
@pytest.mark.parametrize("chunks", [
{"row": 2},
{"row": 3, "chan": 4, "corr": 1},
{"row": 3, "chan": (4, 4, 4, 4), "corr": (2, 2)}],
ids=lambda c: f"chunks={c}")
def test_dataset(ms, select_cols, group_cols, index_cols, shapes, chunks):
""" Test dataset creation """
datasets = read_datasets(ms, select_cols, group_cols,
index_cols, chunks=chunks)
# (1) Read-only TableProxy
# (2) Read-only TAQL TableProxy
assert_liveness(2, 1)
chans = shapes['chan']
corrs = shapes['corr']
# Expected output chunks
echunks = {'chan': normalize_chunks(chunks.get('chan', chans),
shape=(chans,))[0],
'corr': normalize_chunks(chunks.get('corr', corrs),
shape=(corrs,))[0]}
for ds in datasets:
compute_dict = {}
for k, v in ds.data_vars.items():
compute_dict[k] = v.data
assert v.dtype == v.data.dtype
res = dask.compute(compute_dict)[0]
assert res['DATA'].shape[1:] == (chans, corrs)
assert 'STATE_ID' in res
assert 'TIME' in res
chunks = ds.chunks
assert chunks["chan"] == echunks['chan']
assert chunks["corr"] == echunks['corr']
dims = dict(ds.dims)
dims.pop('row') # row changes
assert dims == {"chan": shapes['chan'],
"corr": shapes['corr']}
del ds, datasets, compute_dict, v
assert_liveness(0, 0)
@pytest.mark.parametrize("group_cols", [
["FIELD_ID", "SCAN_NUMBER"],
[]],
ids=group_cols_str)
@pytest.mark.parametrize("index_cols", [
["TIME", "ANTENNA1", "ANTENNA2"]],
ids=index_cols_str)
@pytest.mark.parametrize("select_cols", [
["STATE_ID", "TIME", "DATA"]],
ids=select_cols_str)
@pytest.mark.parametrize("shapes", [
{"row": 10, "chan": 16, "corr": 4}],
ids=lambda s: f"shapes={s}")
@pytest.mark.parametrize("chunks", [
{"row": 2},
{"row": 3, "chan": 4, "corr": 1},
{"row": 3, "chan": (4, 4, 4, 4), "corr": (2, 2)}],
ids=lambda c: f"chunks={c}")
def test_dataset_updates(ms, select_cols,
group_cols, index_cols,
shapes, chunks):
""" Test dataset writes """
# Get original STATE_ID and DATA
with pt.table(ms, ack=False, readonly=True, lockoptions='auto') as T:
original_state_id = T.getcol("STATE_ID")
original_data = T.getcol("DATA")
try:
datasets = read_datasets(ms, select_cols, group_cols,
index_cols, chunks=chunks)
assert_liveness(2, 1)
# Test writes
writes = []
states = []
datas = []
# Create write operations and execute them
for i, ds in enumerate(datasets):
state_var = (("row",), ds.STATE_ID.data + 1)
data_var = (("row", "chan", "corr"), ds.DATA.data + 1, {})
states.append(state_var[1])
datas.append(data_var[1])
new_ds = ds.assign(STATE_ID=state_var, DATA=data_var)
writes.append(write_datasets(ms, new_ds, ["STATE_ID", "DATA"]))
_, states, datas = dask.compute(writes, states, datas)
# NOTE(sjperkins)
# Interesting behaviour here. If these objects are not
# cleared up at this point, attempts to re-open the table below
# can fail, reproducing https://github.com/ska-sa/dask-ms/issues/26
# Adding auto-locking to the table opening command seems to fix
# this somehow
del ds, new_ds, datasets, writes, state_var, data_var
assert_liveness(0, 0)
datasets = read_datasets(ms, select_cols, group_cols,
index_cols, chunks=chunks)
for i, (ds, state, data) in enumerate(zip(datasets, states, datas)):
assert_array_equal(ds.STATE_ID.data, state)
assert_array_equal(ds.DATA.data, data)
del ds, datasets
assert_liveness(0, 0)
finally:
# Restore original STATE_ID
with pt.table(ms, ack=False, readonly=False, lockoptions='auto') as T:
state_id = T.getcol("STATE_ID")
data = T.getcol("DATA")
T.putcol("STATE_ID", original_state_id)
T.putcol("DATA", original_data)
# Compare against expected result
assert_array_equal(original_state_id + 1, state_id)
assert_array_equal(original_data + 1, data)
# Even though we ask for two rows, we get single rows out
# due to the "__row__" in group_col
@pytest.mark.parametrize("chunks", [{"row": 2}], ids=lambda c: str(c))
def test_row_grouping(spw_table, spw_chan_freqs, chunks):
""" Test grouping on single rows """
datasets = read_datasets(spw_table, [], ["__row__"], [], chunks=chunks)
assert_liveness(2, 1)
assert len(datasets) == len(spw_chan_freqs)
for i, chan_freq in enumerate(spw_chan_freqs):
assert_array_equal(datasets[i].CHAN_FREQ.data[0], chan_freq)
assert_array_equal(datasets[i].NUM_CHAN.data[0], chan_freq.shape[0])
del datasets
assert_liveness(0, 0)
def test_antenna_table_string_names(ant_table, wsrt_antenna_positions):
ds = read_datasets(ant_table, [], [], None)
assert len(ds) == 1
ds = ds[0]
names = ["ANTENNA-%d" % i for i in range(wsrt_antenna_positions.shape[0])]
assert_array_equal(ds.POSITION.data, wsrt_antenna_positions)
assert_array_equal(ds.NAME.data, names)
names = ds.NAME.data.compute()
# Test that writing back string ndarrays work as
# they must be converted from ndarrays to lists
# of strings internally
write_cols = set(ds.data_vars.keys()) - set(["ROWID"])
writes = write_datasets(ant_table, ds, write_cols)
dask.compute(writes)
def test_dataset_assign(ms):
""" Test dataset assignment """
datasets = read_datasets(ms, [], [], [])
assert len(datasets) == 1
ds = datasets[0]
# Assign on an existing column is easier because we can
# infer the dimension schema from it
nds = ds.assign(TIME=(ds.TIME.dims, ds.TIME.data + 1))
assert ds.DATA.data is nds.DATA.data
assert ds.TIME.data is not nds.TIME.data
assert_array_equal(nds.TIME.data, ds.TIME.data + 1)
# We have to explicitly supply a dimension schema
nds = ds.assign(ANTENNA3=(("row",), ds.ANTENNA1.data + 3))
assert_array_equal(ds.ANTENNA1.data + 3, nds.ANTENNA3.data)
dims = ds.dims
chunks = ds.chunks
if have_xarray:
match = "'row': length 9 on 'ANTENNA4'"
else:
match = ("Existing dimension size 9 for dimension 'row' "
"is inconsistent with same dimension 10 of array ANTENNA4")
with pytest.raises(ValueError, match=match):
array = da.zeros(dims['row'] - 1, chunks['row'])
nds = ds.assign(ANTENNA4=(("row",), array))
nds.dims
assert chunks['row'] == (10,)
if have_xarray:
match = "Object has inconsistent chunks along dimension row."
else:
match = r"chunking \(4, 4, 2\) for dim"
with pytest.raises(ValueError, match=match):
array = da.zeros(dims['row'], chunks=4)
nds = ds.assign(ANTENNA4=(("row",), array))
nds.chunks
del datasets, ds, nds
assert_liveness(0, 0)
def test_dataset_table_schemas(ms):
""" Test that we can pass table schemas """
data_dims = ("mychan", "mycorr")
table_schema = ["MS", {"DATA": {"dims": data_dims}}]
datasets = read_datasets(ms, [], [], [], table_schema=table_schema)
assert datasets[0].data_vars["DATA"].dims == ("row", ) + data_dims
@pytest.mark.parametrize("dtype", [
np.complex64,
np.complex128,
np.float32,
np.float64,
np.int16,
np.int32,
np.uint32,
bool,
pytest.param(object,
marks=pytest.mark.xfail(reason="putcol can't handle "
"lists of ints")),
pytest.param(np.uint16,
marks=pytest.mark.xfail(reason="RuntimeError: RecordRep::"
"createDataField: unknown data"
" type 17")),
pytest.param(np.uint8,
marks=pytest.mark.xfail(reason="Creates uint16 column")),
])
def test_dataset_add_column(ms, dtype):
datasets = read_datasets(ms, [], [], [])
assert len(datasets) == 1
ds = datasets[0]
# Create the dask array
bitflag = da.zeros_like(ds.DATA.data, dtype=dtype)
# Assign keyword attribute
col_kw = {"BITFLAG": {'FLAGSETS': 'legacy,cubical',
'FLAGSET_legacy': 1,
'FLAGSET_cubical': 2}}
# Assign variable onto the dataset
nds = ds.assign(BITFLAG=(("row", "chan", "corr"), bitflag))
writes = write_datasets(ms, nds, ["BITFLAG"], descriptor='ratt_ms',
column_keywords=col_kw)
dask.compute(writes)
del datasets, ds, writes, nds
assert_liveness(0, 0)
with pt.table(ms, readonly=False, ack=False, lockoptions='auto') as T:
bf = T.getcol("BITFLAG")
assert T.getcoldesc("BITFLAG")['keywords'] == col_kw['BITFLAG']
assert bf.dtype == dtype
def test_dataset_add_string_column(ms):
datasets = read_datasets(ms, [], [], [])
assert len(datasets) == 1
ds = datasets[0]
dims = ds.dims
name_list = ["BOB"] * dims['row']
names = np.asarray(name_list, dtype=object)
names = da.from_array(names, chunks=ds.TIME.chunks)
nds = ds.assign(NAMES=(("row",), names))
writes = write_datasets(ms, nds, ["NAMES"])
dask.compute(writes)
del datasets, ds, writes, nds
assert_liveness(0, 0)
with pt.table(ms, readonly=False, ack=False, lockoptions='auto') as T:
assert name_list == T.getcol("NAMES")
@pytest.mark.parametrize("chunks", [
{"row": (36,)},
{"row": (18, 18)}])
def test_dataset_multidim_string_column(tmp_path, chunks):
row = sum(chunks['row'])
name_list = [["X-%d" % i, "Y-%d" % i, "Z-%d" % i] for i in range(row)]
np_names = np.array(name_list, dtype=object)
names = da.from_array(np_names, chunks=(chunks['row'], np_names.shape[1]))
ds = Dataset({"POLARIZATION_TYPE": (("row", "xyz"), names)})
table_name = str(tmp_path / "test.table")
writes = write_datasets(table_name, ds, ["POLARIZATION_TYPE"])
dask.compute(writes)
del writes
assert_liveness(0, 0)
datasets = read_datasets(table_name, [], [], [],
chunks={'row': chunks['row']})
assert len(datasets) == 1
assert_array_equal(datasets[0].POLARIZATION_TYPE.data, np_names)
del datasets
assert_liveness(0, 0)
@pytest.mark.parametrize("dataset_chunks", [
[{'row': (5, 3, 2), 'chan': (4, 4, 4, 4), 'corr': (4,)},
{'row': (4, 3, 3), 'chan': (5, 5, 3, 3), 'corr': (2, 2)}],
])
@pytest.mark.parametrize("dtype", [np.complex128, np.float32])
def test_dataset_create_table(tmp_path, dataset_chunks, dtype):
datasets = []
names = []
datas = []
row_sum = 0
for chunks in dataset_chunks:
shapes = {k: sum(c) for k, c in chunks.items()}
row_sum += shapes['row']
# Make some visibilities
dims = ("row", "chan", "corr")
shape = tuple(shapes[d] for d in dims)
data_chunks = tuple(chunks[d] for d in dims)
data = da.random.random(shape, chunks=data_chunks).astype(dtype)
data_var = Variable(dims, data, {})
# Make some string names
dims = ("row",)
shape = tuple(shapes[d] for d in dims)
str_chunks = tuple(chunks[d] for d in dims)
np_str_array = np.asarray(["BOB"] * shape[0], dtype=object)
da_str_array = da.from_array(np_str_array, chunks=str_chunks)
str_array_var = Variable(dims, da_str_array, {})
datasets.append(Dataset({"DATA": data_var, "NAMES": str_array_var}))
datas.append(data)
names.extend(np_str_array.tolist())
freq = da.linspace(.856e9, 2*.856e9, 64, chunks=16)
sub_datasets = [Dataset({"FREQ": (("row", "chan"), freq[None, :])})]
# Write the data to new tables
table_name = os.path.join(str(tmp_path), 'test.table')
writes = write_datasets(table_name, datasets, ["DATA", "NAMES"])
subt_writes = write_datasets(table_name + "::SPW",
sub_datasets, ["FREQ"])
dask.compute(writes, subt_writes)
# Check written data
with pt.table(table_name, readonly=True,
lockoptions='auto', ack=False) as T:
assert row_sum == T.nrows()
assert_array_equal(T.getcol("DATA"), np.concatenate(datas))
assert_array_equal(T.getcol("NAMES"), names)
# Sub-table correctly linked and populated
with pt.table(table_name + "::SPW", readonly=True,
lockoptions='auto', ack=False) as T:
assert T.nrows() == 1
assert_array_equal(T.getcol("FREQ")[0], freq)
@pytest.mark.parametrize("chunks", [
{'row': (5, 3, 2), 'chan': (16,), 'corr': (4,)},
])
@pytest.mark.parametrize("dtype", [np.complex128, np.float32])
def test_write_dict_data(tmp_path, chunks, dtype):
rs = np.random.RandomState(42)
row_sum = 0
def _vis_factory(chan, corr):
# | |
can you find this man, Mr.
Stone?”
“We’ve got to find him,” Stone returned, “whether we can or not. It’s
really a baffling case. I think we’ve discovered the origin of the fire
in the garage.”
He told the story that Fibsy had learned from the chauffeur, and Keefe
was greatly interested.
“What are the acids?” he asked.
“I don’t know the exact names,” Stone admitted, “but they are of just
such powers as Fulton described, and the thing is plausible. Here’s the
bottle.” He offered the little vial for inspection and Keefe looked at it
with some curiosity.
“The theory being,” he said, “that the murderer first arranged for a fire
in our car—in Mr. Appleby’s car—and then waited for the fire to come off
as planned. Then, at the moment of greatest excitement, he, being
probably the man the servants saw—shot through the bay window and killed
Mr. Appleby. You were fortunate, <NAME>, that you weren’t hit first!”
“Oh, I was in no danger. I sat well back in the window-seat, and over to
one side, out of range of a shot from outside. And, too, Mr. Keefe, I can
scarcely discuss this matter of the shot from outside, as I am, myself,
the confessed criminal.”
“Confessing only to save me from suspicion,” said her father, with an
affectionate glance. “But it won’t do any good, dear. I take the burden
of the crime and I own up that I did it. This man on the veranda—if,
indeed, there was such a one, may have been any of the men servants about
the place, startled by the cry of fire, and running to assure himself of
the safety of the house and family. He, doubtless, hesitates to divulge
his identity lest he be suspected of shooting.”
“That’s all right,” declared Fibsy, “but if it was one of your men, he’d
own up by this time. He’d know he wouldn’t be suspected of shooting Mr.
Appleby. Why should he do it?”
“Why should anybody do it, except myself?” asked <NAME>. “Not all
the detectives in the world can find any one else with a motive and
opportunity. The fact that both my wife and daughter tried to take the
crime off my shoulders only makes me more determined to tell the truth.”
“But you’re not telling the truth, dad,” and Maida looked at him. “You
know I did it—you know I had threatened to do it—you know I felt I just
could not stand <NAME>’s oppression of you another day! And so—and
so, I——”
“Go on, <NAME>,” urged Stone, “and so you—what did you do?”
“I ran across the den to the drawer where father keeps his pistol; I took
it and shot—then I ran back to the window-seat——”
“What did you do with the pistol?”
“Threw it out of the window.”
“Toward the right or left?”
“Why, I don’t know.”
“Try to think. Stand up there now, and remember which way you flung it.”
Reluctantly, Maida went to the bay window, and stood there thinking.
“I don’t know,” she said, at last. “I can’t remember.”
“It doesn’t matter,” said Keefe. “I think we can prove that it was none
of the Wheelers, but there was a man, an intruder, on the veranda who
shot. Even if we never find out his identity, we may prove that he was
really there. Where is this maid who saw him clearly? Rachel—is that her
name?”
“That’s a pretty thing, too!” Fibsy spoke up. “She has flew the coop.”
“Gone! Where?” Keefe showed his disappointment.
“Nobody knows where. She just simply lit out. Even her lover doesn’t know
where she is.”
“Who is her lover?”
“Fulton, the chauffeur. He’s just about crazy over her disappearance.”
“Oh, she’ll return,” surmised Stone. “She became frightened at something
and ran off. I think she’ll come back. If not, we’ll have to give chase.
We must find her, as she’s the principal witness of the man on the
veranda. Cook is not so sure about him.”
“Who could he have been?” Keefe said. “Doubtless some enemy of Mr.
Appleby, in no way connected with the Wheelers.”
“Probably,” agreed Stone.
“We found the pistol, you know, Mr. Keefe,” remarked Fibsy.
“You did! Well, you have made progress. Where was it?”
“In the fern bed, not far from the veranda railing.”
“Just where the man would have thrown it!” exclaimed Keefe.
“Or where I threw it,” put in Daniel Wheeler.
“I’d like to see the exact place it was found,” Keefe said.
“Come on, I’ll show you,” offered Fibsy and the two started away
together.
“Here you are,” and Fibsy showed the bed of ferns, which, growing closely
together, made a dense hiding place.
“A wonder you ever found it,” said Keefe. “How’d you happen to?”
“Oh, I just snooped around till I came to it. I says to myself, ‘Either
the murderer flung it away or he didn’t. If he did, why it must be
somewheres,’ and it was.”
“I see; and does Mr. Stone think the finding of it here points to either
of the Wheelers?”
“Not necess’rily. You see, if the man we’re looking for did the shooting,
he’s the one who threw the pistol in this here fern bed. And, you know
yourself, it’s more likely a man threw this farther than a woman.”
“<NAME> is athletic.”
“I know, but I’m convinced that <NAME> didn’t do the deed. Ain’t
you?”
“Oh, I can’t think she did it, of course. But it’s all very mysterious.”
“Not mysterious a bit. It’s hard sleddin’, but there ain’t much mystery
about it. Why, look a-here. If either the father or daughter did it, they
both know which one it was. Therefore, one is telling the truth and one
isn’t. It won’t be hard to find out which is which, but <NAME>, he’s
trying to find some one that’ll let the Wheelers both out.”
“Oh, that’s his idea? And a mighty good one. I’ll help all I can. Of
course, the thing to do is to trace the pistol.”
“Oh, it was Mr. Wheeler’s pistol, all right.”
“It was!” Keefe looked dismayed. “Then how can we suspect an outsider?”
“Well, he could have stolen Mr. Wheeler’s pistol for the purpose of
casting suspicion on him.”
“Yes; that’s so. Now to find that Rachel.”
“Oh, do find her,” Maida cried, overhearing the remark as she and
Genevieve crossed the lawn toward Keefe and Fibsy.
The lad had not yet seen Miss Lane and he frankly admired her at once.
Perhaps a sympathetic chord was struck by the similarity of their
natures. Perhaps they intuitively recognized each other’s gay impudence,
for they engaged in a clash of words that immediately made them friends.
“Maybe Rachel’d come back if she knew you were here,” he said. “I’m sure
she’d admire to wait on such a pretty lady.”
“Just tell her that you saw me,” Genevieve said, “and I’ll be glad to
have her back. She’s a first-class ladies’ maid.”
“Oh, then she only waits on first-class ladies?”
“Yes; that’s why she’s so fond of me. Do hunt her up.”
“Well, cutie, just for you, I’ll do that same. Where shall I go to look
for her?”
“How should I know? But you keep watch of Fulton, and I’ll bet he gets
some word from her.”
“Yes, they’re sweethearts. Now, how do sweethearts get word to each
other? You ought to know all about sweethearting.”
“I don’t,” said Genevieve, demurely.
“Pshaw, now, that’s too bad. Want me to teach you?”
“Yes—if you don’t mind.”
“Saunter away with me, then,” and the saucy boy led <NAME> off for a
stroll round the grounds.
“Honest, now, do you want to help?” he asked.
“Yes, I do,” she asserted. “I’m downright fond of Maida, and though I
know she didn’t do it, yet she and her father will be suspected unless we
can find this other person. And the only way to get a line on him, seems
to be through Rachel. Why do you suppose she ran away?”
“Can’t imagine. Don’t see how she could get scared.”
“No; what would scare her? I think she’s at some neighbor’s.”
“Let’s you and me go to all the neighbors and see.”
“All right. We’ll go in the Wheelers’ little car. Fulton will take us.”
“Don’t we get permission?”
“Nixy. They might say no, by mistake for a yes. Come on—we’ll just hook
Jack.”
To the garage they went and easily persuaded Fulton to take | |
#!/usr/bin/env python
"""
This module implements more advanced transformations.
"""
from __future__ import division
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Jul 24, 2012"
import numpy as np
from fractions import gcd, Fraction
from pymatgen.core.structure import Specie, Composition
from pymatgen.core.periodic_table import smart_element_or_specie
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.finder import SymmetryFinder
from pymatgen.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel, \
EwaldElectrostaticModel, NsitesModel
from pymatgen.serializers.json_coders import PMGJSONDecoder
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
"""
def __init__(self, charge_balance_sp):
"""
Args:
charge_balance_sp
specie to add or remove. Currently only removal is supported
"""
self._charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = smart_element_or_specie(self._charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation({self._charge_balance_sp:
{self._charge_balance_sp:
1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self._charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"charge_balance_sp": self._charge_balance_sp},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
"""
def __init__(self, transformations):
"""
Args:
transformations:
list of transformations to apply to a structure. One
transformation is applied to each output structure.
"""
self._transformations = transformations
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
structures.append({"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"transformations": self._transformations},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class MultipleSubstitutionTransformation(object):
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace
species to be replaced
r_fraction
fraction of that specie to replace
substitution_dict
dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species:
If specified, will balance the charge on the structure using
that specie.
"""
self._sp_to_replace = sp_to_replace
self._r_fraction = r_fraction
self._substitution_dict = substitution_dict
self._charge_balance_species = charge_balance_species
self._order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self._substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self._sp_to_replace] = {self._sp_to_replace:
1 - self._r_fraction,
dummy_sp: self._r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self._charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self._charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self._order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation({"X{}+".format(str(charge)):
"{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self._sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"sp_to_replace": self._sp_to_replace,
"r_fraction": self._r_fraction,
"substitution_dict": self._substitution_dict,
"charge_balance_species":
self._charge_balance_species},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False):
"""
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
"""
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure:
Structure to order.
return_ranked_list:
Boolean stating whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if structure.is_ordered:
raise ValueError("Enumeration can be carried out only on "
"disordered structures!")
if self.refine_structure:
finder = SymmetryFinder(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = False
for sp in structure.composition.elements:
if hasattr(sp, "oxi_state") and sp._oxi_state != 0:
contains_oxidation_state = True
break
adaptor = EnumlibAdaptor(structure, min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
symm_prec=self.symm_prec,
refine_structure=False)
adaptor.run()
structures = adaptor.structures
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state:
if transformation not in ewald_matrices:
s_supercell = Structure.from_sites(structure.sites)
s_supercell.make_supercell(transformation)
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] if contains_oxidation_state \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"init_args": {"symm_prec": self.symm_prec,
"min_cell_size": self.min_cell_size,
"max_cell_size": self.max_cell_size,
"refine_structure": self.refine_structure},
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
"""
def __init__(self, threshold=1e-2, **kwargs):
"""
Args:
kwargs:
args for SubstitutionProbability class
lambda_table, alpha
"""
self._kwargs = kwargs
self._threshold = threshold
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self._threshold, 'substitutions': {}}
#dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
| |
<filename>src/geneflow/workflow.py
"""This module contains the GeneFlow Workflow class."""
import time
import copy
import requests
from slugify import slugify
import yaml
from geneflow.log import Log
from geneflow.data import DataSource, DataSourceException
from geneflow.data_manager import DataManager
from geneflow.definition import Definition
from geneflow.workflow_dag import WorkflowDAG, WorkflowDAGException
from geneflow.uri_parser import URIParser
from geneflow.extend.contexts import Contexts
class Workflow:
"""Wraps workflow, job, app loading and running calls."""
def __init__(self, job_id, config):
"""
Initialize the GeneFlow Workflow class.
Initialize the class by loading the job and the config.
Args:
self: class instance
job_id: Job identifier
config: the Workflow subsection of the GeneFlow configuration
Returns:
Class instance.
"""
self._config = config # configuration structure
self._job_id = job_id
self._job = None # job definition
self._workflow = None # workflow definition
self._apps = None # app definitions
self._dag = None # WorkflowDAG class instance
self._status = 'PENDING'
self._parsed_job_work_uri = {}
self._parsed_job_output_uri = {}
self._exec_contexts = set() # all execution contexts
self._data_contexts = set() # all data contexts
# context-specific data and methods
self._workflow_context = {}
def initialize(self):
"""
Initialize the GeneFlow Workflow class.
Initialize the class by loading the workflow and job definitions
from the database, creating work and output URIs, and creating step
objects.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# load and validate job definition from database
if not self._load_job():
msg = 'cannot load job definition'
Log.an().error(msg)
return self._fatal(msg)
# load and validate workflow definition from database
if not self._load_workflow():
msg = 'cannot load workflow definition'
Log.an().error(msg)
return self._fatal(msg)
# load and validate app definitions from database
if not self._load_apps():
msg = 'cannot load app definitions'
Log.an().error(msg)
return self._fatal(msg)
# inject job parameters into workflow def
if not self._inject_job_params():
msg = 'cannot inject job parameters into workflow definition'
Log.an().error(msg)
return self._fatal(msg)
# initialize set of execution contexts
if not self._init_exec_context_set():
msg = 'cannot initialize set of execution contexts'
Log.an().error(msg)
return self._fatal(msg)
# initialize set of data contexts
if not self._init_data_context_set():
msg = 'cannot initialize set of data contexts'
Log.an().error(msg)
return self._fatal(msg)
# validate all work and output URIs
if not self._init_job_uris():
msg = 'cannot construct and validate work and output uris'
Log.an().error(msg)
return self._fatal(msg)
# initialize context-specific workflow items (e.g., context connection info)
if not self._init_workflow_contexts():
msg = 'cannot initialize context-specific workflow properties'
Log.an().error(msg)
return self._fatal(msg)
# create all work and output URIs
if not self._create_job_uris():
msg = 'cannot create work and output uris'
Log.an().error(msg)
return self._fatal(msg)
# initialize context-specific workflow data items (e.g., create remote directories)
if not self._init_workflow_context_data():
msg = 'cannot initialize context-specific workflow data'
Log.an().error(msg)
return self._fatal(msg)
# initialize directed acyclic graph structure
if not self._init_dag():
msg = 'cannot initialize workflow graph structure'
Log.an().error(msg)
return self._fatal(msg)
return True
def __str__(self):
"""
Workflow string representation.
Args:
None.
Returns:
A string representation of the workflow.
"""
str_rep = (
'Job: {} ({})'
'\n Workflow: {}'
'\n Version: {}'
'\n Description: {}'
'\n Git: {}'
).format(
self._job['name'],
self._job_id,
self._workflow['name'],
self._workflow['version'],
self._workflow['description'],
self._workflow['git']
)
str_rep += '\n Inputs: '
for input_key in self._workflow['inputs']:
str_rep += '\n {}: {}'.format(
input_key, self._workflow['inputs'][input_key]['value']
)
str_rep += '\n Parameters: '
for parameter_key in self._workflow['parameters']:
str_rep += '\n {}: {}'.format(
parameter_key,
self._workflow['parameters'][parameter_key]['value']
)
str_rep += '\n Work URIs: '
for context in self._parsed_job_work_uri:
str_rep += '\n {}: {}'.format(
context, self._parsed_job_work_uri[context]['chopped_uri']
)
str_rep += '\n Output URI: {}'.format(
self._parsed_job_output_uri['chopped_uri']
)
return str_rep
def _fatal(self, msg):
self._update_status_db('ERROR', msg)
return False
def _load_job(self):
"""
Load and validate job definition from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._job = data_source.get_job_def_by_id(self._job_id)
if self._job is False:
msg = 'cannot load job from data source: job_id={}'\
.format(self._job_id)
Log.an().error(msg)
return self._fatal(msg)
if not self._job:
msg = 'job not found: job_id={}'.format(self._job_id)
Log.an().error(msg)
return self._fatal(msg)
# validate the job definition
valid_def = Definition.validate_job(self._job)
if valid_def is False:
msg = 'invalid job definition:\n{}'.format(yaml.dump(self._job))
Log.an().error(msg)
return self._fatal(msg)
self._job = valid_def
return True
def _load_workflow(self):
"""
Load and validate workflow definition from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._workflow = data_source.get_workflow_def_by_id(
self._job['workflow_id']
)
if self._workflow is False:
msg = 'cannot load workflow from data source: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
if not self._workflow:
msg = 'workflow not found: workflow_id={}'\
.format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
# validate the workflow definition
valid_def = Definition.validate_workflow(self._workflow)
if valid_def is False:
msg = 'invalid workflow definition:\n{}'\
.format(yaml.dump(self._workflow))
Log.an().error(msg)
return self._fatal(msg)
self._workflow = valid_def
return True
def _load_apps(self):
"""
Load and validate app definitions from the database.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
try:
data_source = DataSource(self._config['database'])
except DataSourceException as err:
msg = 'data source initialization error [{}]'.format(str(err))
Log.an().error(msg)
return self._fatal(msg)
self._apps = data_source.get_app_defs_by_workflow_id(
self._job['workflow_id']
)
if self._apps is False:
msg = 'cannot load apps from data source: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
if not self._apps:
msg = 'no apps found for workflow: workflow_id={}'.\
format(self._job['workflow_id'])
Log.an().error(msg)
return self._fatal(msg)
# validate the app definitions
for app in self._apps:
valid_def = Definition.validate_app(self._apps[app])
if valid_def is False:
msg = 'invalid app definition:\n{}'\
.format(yaml.dump(self._apps[app]))
Log.an().error(msg)
return self._fatal(msg)
self._apps[app] = valid_def
return True
def _inject_job_params(self):
# substitute inputs
for input_key in self._workflow['inputs']:
self._workflow['inputs'][input_key]['value']\
= self._workflow['inputs'][input_key]['default']
for input_key in self._job['inputs']:
if input_key in self._workflow['inputs']:
self._workflow['inputs'][input_key]['value']\
= self._job['inputs'][input_key]
# substitute parameters
for parameter_key in self._workflow['parameters']:
self._workflow['parameters'][parameter_key]['value']\
= self._workflow['parameters'][parameter_key]['default']
for parameter_key in self._job['parameters']:
if parameter_key in self._workflow['parameters']:
self._workflow['parameters'][parameter_key]['value']\
= self._job['parameters'][parameter_key]
# update publish list
if self._job['publish']:
# over-ride the workflow publish list with the job publish list
self._workflow['publish'] = self._job['publish']
# update the publish list based on publish flag of each step
for step_name, step in self._workflow['steps'].items():
if step['publish']:
if step_name not in self._workflow['publish']:
self._workflow['publish'].append(step_name)
# insert step execution parameters
for step_name, step in self._workflow['steps'].items():
step['execution'] = {
'context': self._job['execution']['context']['default'],
'method': self._job['execution']['method']['default'],
'parameters': copy.deepcopy(self._job['execution']['parameters']['default'])
}
if step_name in self._job['execution']['context']:
step['execution']['context'] \
= self._job['execution']['context'][step_name]
if step_name in self._job['execution']['method']:
step['execution']['method'] \
= self._job['execution']['method'][step_name]
if step_name in self._job['execution']['parameters']:
# only copy params that have been set to avoid deleting default params
for param_name in self._job['execution']['parameters'][step_name]:
step['execution']['parameters'][param_name] \
= self._job['execution']['parameters'][step_name][param_name]
return True
def _init_exec_context_set(self):
"""
Initialize set of execution contexts, which is specified by the execution.context job
parameters.
Args:
self: class instance
Returns:
On success: True.
"""
# get explicit execution contexts from the job parameters
self._exec_contexts = set(self._job['execution']['context'].values())
# check validity of exec contexts
for context in self._exec_contexts:
if not Contexts.is_exec_context(context):
msg = 'invalid exec context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
Log.some().debug('execution contexts: %s', self._exec_contexts)
return True
def _init_data_context_set(self):
"""
Initialize set of data contexts, which is determined by inputs and output.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# check input URIs for data contexts
for input_key in self._workflow['inputs']:
parsed_uri = URIParser.parse(self._workflow['inputs'][input_key]['value'][0])
if not parsed_uri:
msg = 'invalid input uri: {}'.format(
self._workflow['inputs'][input_key]['value'][0]
)
Log.an().error(msg)
return self._fatal(msg)
self._data_contexts.add(parsed_uri['scheme'])
# add output URI data context
parsed_output_uri = URIParser.parse(self._job['output_uri'])
if not parsed_output_uri:
msg = 'invalid base of job output uri: {}'.format(
self._job['output_uri']
)
Log.an().error(msg)
return self._fatal(msg)
self._data_contexts.add(parsed_output_uri['scheme'])
# check validity of data contexts
for context in self._data_contexts:
if not Contexts.is_data_context(context):
msg = 'invalid data context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
Log.some().debug('data contexts: %s', self._data_contexts)
return True
def _init_job_uris(self):
"""
Initialize all work and output URIs.
Args:
self: class instance
Returns:
On success: True.
On failure: False.
"""
# name of the job directory
job_dir = slugify(self._job['name'], regex_pattern=r'[^-a-z0-9_]+')
job_dir_hash = '{}-{}'.format(job_dir, self._job['job_id'][:8])
# validate work URI for each exec context
# use the 'data_scheme' for each execution context
# and place into a set to remove repeats
for context in {
Contexts.get_data_scheme_of_exec_context(con)
for con in self._exec_contexts
}:
# work_uri must be set for each exec_context
if context not in self._job['work_uri']:
msg = 'missing work_uri for context: {}'.format(context)
Log.an().error(msg)
return self._fatal(msg)
parsed_uri = URIParser.parse(self._job['work_uri'][context])
if not parsed_uri:
msg = 'invalid base of job work uri for context: | |
import copy
from keras import optimizers
from keras import backend as K
from keras import objectives
from keras.models import Model
from keras.layers import Input, Dense, Lambda, LSTM, Concatenate, LeakyReLU
from keras.layers.core import Dense, Activation, Flatten, RepeatVector
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import GRU
from keras.layers.convolutional import Convolution1D
class TilingVAE():
autoencoder = None
def create(self,
charset,
max_length = 120,
latent_rep_size = 292,
weights_file = None):
charset_length = len(charset)
x = Input(shape=(max_length, charset_length))
_, z = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, z)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(
encoded_input,
self._buildDecoder(
encoded_input,
latent_rep_size,
max_length,
charset_length
)
)
x1 = Input(shape=(max_length, charset_length))
vae_loss, z1 = self._buildEncoder(x1, latent_rep_size, max_length)
self.autoencoder = Model(
x1,
self._buildDecoder(
z1,
latent_rep_size,
max_length,
charset_length
)
)
if weights_file:
self.autoencoder.load_weights(weights_file)
self.encoder.load_weights(weights_file, by_name = True)
self.decoder.load_weights(weights_file, by_name = True)
self.autoencoder.compile(optimizer = 'Adam',
loss = vae_loss,
metrics = ['accuracy'])
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., stddev = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.125 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = GRU(701, return_sequences = True, name='out_gru_1')(h)
h = GRU(701, return_sequences = True, name='out_gru_2')(h)
h = GRU(701, return_sequences = True, name='out_gru_3')(h)
return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def save(self, filename):
self.autoencoder.save_weights(filename)
def load(self, charset, weights_file, max_w_length=120, latent_rep_size=292):
self.create(charset, weights_file=weights_file, max_length=max_w_length, latent_rep_size=latent_rep_size)
class Tiling_LSTM_VAE():
autoencoder = None
def create(self,
charset,
max_length = 120,
latent_rep_size = 292,
weights_file = None):
charset_length = len(charset)
x = Input(shape=(max_length, charset_length), name='main_input')
_, latent_x = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, latent_x)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(
encoded_input,
self._buildDecoder(
encoded_input,
latent_rep_size,
max_length,
charset_length
)
)
ae_input = Input(shape=(max_length, charset_length), name='main_input')
vae_loss, ae_latent_z = self._buildEncoder(ae_input, latent_rep_size, max_length)
self.autoencoder = Model(
ae_input,
self._buildDecoder(
ae_latent_z,
latent_rep_size,
max_length,
charset_length
)
)
if weights_file:
self.autoencoder.load_weights(weights_file)
self.encoder.load_weights(weights_file, by_name = True)
self.decoder.load_weights(weights_file, by_name = True)
opt = optimizers.Adam(lr=0.00001, amsgrad=True)
self.autoencoder.compile(optimizer = opt,
loss = vae_loss,
metrics = ['accuracy'])
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
h = LSTM(301, return_sequences = True, name='in_lstm_1')(x)
h = LSTM(301, return_sequences = True, name='in_lstm_2')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., stddev = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.125 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = LSTM(501, return_sequences=True, name='out_lstm_1')(h)
h = LSTM(501, return_sequences=True, name='out_lstm_2')(h)
return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def save(self, filename):
self.autoencoder.save_weights(filename)
def load(self, charset, weights_file, max_w_length=120, latent_rep_size=292):
self.create(charset, weights_file=weights_file, max_length=max_w_length, latent_rep_size=latent_rep_size)
class Tiling_Triplet_LSTM_VAE():
autoencoder = None
def create(self,
charset,
max_length = 120,
latent_rep_size = 292,
weights_file = None):
charset_length = len(charset)
x = Input(shape=(max_length, charset_length), name='main_input')
y = Input(shape=(max_length, charset_length), name='positive_input')
z = Input(shape=(max_length, charset_length), name='negative_input')
_, latent_x = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, latent_x)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(
encoded_input,
self._buildDecoder(
encoded_input,
latent_rep_size,
max_length,
charset_length
)
)
_, latent_y = self._buildEncoder(y, latent_rep_size, max_length)
_, latent_z = self._buildEncoder(z, latent_rep_size, max_length)
vae_loss, latent_x = self._buildEncoder(x, latent_rep_size, max_length)
# self.autoencoder = Model(
# x,
# self._buildDecoder(
# latent_x,
# latent_rep_size,
# max_length,
# charset_length
# )
# )
self.autoencoder = Model(
(x,y,z),
self._buildDecoder(
latent_x,
latent_rep_size,
max_length,
charset_length
)
)
# contrastive loss on sampled latent point coordinates
def triplet_loss(x_in, x_pred):
lat_x = K.flatten(latent_x)
pos_y = K.flatten(latent_y)
neg_z = K.flatten(latent_z)
d_pos = lat_x - pos_y
d_neg = lat_x - neg_z
tr_loss = K.maximum(0.0, 0.0 + K.sum(d_pos * d_pos) - K.sum(d_neg * d_neg))
return tr_loss
def combined_loss(x_in, x_pred):
return vae_loss(x_in, x_pred) + triplet_loss(x_in, x_pred)
if weights_file:
self.autoencoder.load_weights(weights_file)
self.encoder.load_weights(weights_file, by_name = True)
self.decoder.load_weights(weights_file, by_name = True)
opt = optimizers.Adam(lr=0.00001, amsgrad=True)
self.autoencoder.compile(optimizer = opt,
loss = combined_loss,
metrics = [triplet_loss, vae_loss, 'accuracy'])
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 1.0):
#x,y,z = input_triple
h = LSTM(301, return_sequences = True, name='in_lstm_1')(x)
h = LSTM(301, return_sequences = True, name='in_lstm_2')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
z_mean_x = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var_x = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., stddev = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
latent_x = Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean_x, z_log_var_x])
# h_y = LSTM(301, return_sequences = True, name='in_lstm_1')(y)
# h_y = LSTM(301, return_sequences = True, name='in_lstm_2')(h_y)
# h_y = Flatten(name='flatten_1')(h_y)
# h_y = Dense(435, activation = 'relu', name='dense_1')(h_y)
# z_mean_y = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h_y)
# z_log_var_y = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h_y)
# latent_y = Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean_y, z_log_var_y])
# h_z = LSTM(301, return_sequences = True, name='in_lstm_1')(z)
# h_z = LSTM(301, return_sequences = True, name='in_lstm_2')(h_z)
# h_z = Flatten(name='flatten_1')(h_z)
# h_z = Dense(435, activation = 'relu', name='dense_1')(h_z)
# z_mean_z = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h_z)
# z_log_var_z = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h_z)
# latent_z = Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean_z, z_log_var_z])
def vae_loss(x_in, x_decoded):
#x,y,z = x_in
x = K.flatten(x_in)
x_decoded = K.flatten(x_decoded)
#cross entropy
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded)
#Kullback-Leibler regularization
kl_loss = - 0.25 * K.sum(1 + z_log_var_x - K.square(z_mean_x) - K.exp(z_log_var_x), axis = -1)
return xent_loss + kl_loss
#triplet contrastive loss on Euclidean distance in latent space
# flat_x = K.flatten(z_mean_x)
# flat_y = K.flatten(z_mean_y)
# flat_z = K.flatten(z_mean_z)
# d_pos = flat_x - flat_y
# d_neg = flat_x - flat_z
# tr_loss = K.max(0.0, 1.0 + K.dot(d_pos, d_pos) - K.dot(d_neg, d_neg))
#return xent_loss + kl_loss + 0.125 * tr_loss
return (vae_loss, latent_x)
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
h = RepeatVector(max_length, name='repeat_vector')(h)
h = LSTM(501, return_sequences=True, name='out_lstm_1')(h)
h = LSTM(501, return_sequences=True, name='out_lstm_2')(h)
return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
def save(self, filename):
self.autoencoder.save_weights(filename)
def load(self, charset, weights_file, max_w_length=120, latent_rep_size=292):
self.create(charset, weights_file=weights_file, max_length=max_w_length, latent_rep_size=latent_rep_size)
class Tiling_LSTM_VAE_XL():
autoencoder = None
def create(self,
charset,
max_length = 120,
latent_rep_size = 292,
weights_file = None):
charset_length = len(charset)
x = Input(shape=(max_length, charset_length), name='main_input')
y = Input(shape=(max_length, charset_length), name='positive_input')
z = Input(shape=(max_length, charset_length), name='negative_input')
_, latent_x = self._buildEncoder(x, latent_rep_size, max_length)
self.encoder = Model(x, latent_x)
encoded_input = Input(shape=(latent_rep_size,))
self.decoder = Model(
encoded_input,
self._buildDecoder(
encoded_input,
latent_rep_size,
max_length,
charset_length
)
)
_, latent_y = self._buildEncoder(y, latent_rep_size, max_length)
_, latent_z = self._buildEncoder(z, latent_rep_size, max_length)
vae_loss, latent_x = self._buildEncoder(x, latent_rep_size, max_length)
# self.autoencoder = Model(
# x,
# self._buildDecoder(
# latent_x,
# latent_rep_size,
# max_length,
# charset_length
# )
# )
self.autoencoder = Model(
(x,y,z),
self._buildDecoder(
latent_x,
latent_rep_size,
max_length,
charset_length
)
)
# contrastive loss on sampled latent point coordinates
def triplet_loss(x_in, x_pred):
lat_x = K.flatten(latent_x)
pos_y = K.flatten(latent_y)
neg_z = K.flatten(latent_z)
d_pos = lat_x - pos_y
d_neg = lat_x - neg_z
tr_loss = K.maximum(0.0, 1.0 + K.sum(d_pos * d_pos) - K.sum(d_neg * d_neg))
return tr_loss
def combined_loss(x_in, x_pred):
return vae_loss(x_in, x_pred) + 0.25 * triplet_loss(x_in, x_pred)
if weights_file:
self.autoencoder.load_weights(weights_file)
self.encoder.load_weights(weights_file, by_name = True)
self.decoder.load_weights(weights_file, by_name = True)
opt = optimizers.Adam(lr=0.00001, amsgrad=True)
self.autoencoder.compile(optimizer = opt,
loss = combined_loss,
metrics = [triplet_loss, vae_loss, 'accuracy'])
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
lstm_0_f = LSTM(512, return_sequences=True, name='lstm_0_f')(x)
lstm_0_b = LSTM(512, return_sequences=True, name='lstm_0_b', go_backwards=True)(x)
x | |
<reponame>r-o-b-e-r-t-o/sphinxcontrib-needs
import math
import os
import matplotlib
import numpy
from docutils import nodes
from sphinxcontrib.needs.filter_common import FilterBase, filter_needs
if not os.environ.get("DISPLAY"):
matplotlib.use("Agg")
import hashlib
from docutils.parsers.rst import directives
from sphinxcontrib.needs.logging import get_logger
logger = get_logger(__name__)
class Needbar(nodes.General, nodes.Element):
pass
class NeedbarDirective(FilterBase):
"""
Directive to plot diagrams with the help of matplotlib
.. versionadded: 0.7.5
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
"style": directives.unchanged_required,
"colors": directives.unchanged_required,
"text_color": directives.unchanged_required,
"x_axis_title": directives.unchanged_required,
"xlabels": directives.unchanged_required,
"xlabels_rotation": directives.unchanged_required,
"y_axis_title": directives.unchanged_required,
"ylabels": directives.unchanged_required,
"ylabels_rotation": directives.unchanged_required,
"separator": directives.unchanged_required,
"legend": directives.flag,
"stacked": directives.flag,
"show_sum": directives.flag,
"transpose": directives.flag,
"horizontal": directives.flag,
}
# Algorithm:
# 1. define constants
# 2. Stores infos for needbar
def run(self):
# 1. define constants
env = self.state.document.settings.env
if not hasattr(env, "need_all_needbar"):
env.need_all_needbar = {}
# be sure, global var is available. If not, create it
if not hasattr(env, "needs_all_needs"):
env.needs_all_needs = {}
id = env.new_serialno("needbar")
targetid = f"needbar-{env.docname}-{id}"
targetnode = nodes.target("", "", ids=[targetid])
error_id = f"Needbar - file '{env.docname}' - line '{self.lineno}'"
content = self.content
if not content:
raise Exception(f"{error_id} content cannot be empty.")
title = self.arguments[0].strip() if self.arguments else None
text_color = self.options.get("text_color", None)
if text_color:
text_color = text_color.strip()
style = self.options.get("style", None)
style = style.strip() if style else matplotlib.style.use("default")
legend = "legend" in self.options
colors = self.options.get("colors", None)
if colors:
colors = [x.strip() for x in colors.split(",")]
x_axis_title = self.options.get("x_axis_title", None)
if x_axis_title:
x_axis_title = x_axis_title.strip()
xlabels = self.options.get("xlabels", None)
if xlabels:
xlabels = [x.strip() for x in xlabels.split(",")]
xlabels_rotation = self.options.get("xlabels_rotation", None)
if xlabels_rotation:
xlabels_rotation = xlabels_rotation.strip()
y_axis_title = self.options.get("y_axis_title", None)
if y_axis_title:
y_axis_title = y_axis_title.strip()
ylabels = self.options.get("ylabels", None)
if ylabels:
ylabels = [y.strip() for y in ylabels.split(",")]
ylabels_rotation = self.options.get("ylabels_rotation", None)
if ylabels_rotation:
ylabels_rotation = ylabels_rotation.strip()
separator = self.options.get("separator", None)
if not separator:
separator = ","
stacked = "stacked" in self.options
show_sum = "show_sum" in self.options
transpose = "transpose" in self.options
horizontal = "horizontal" in self.options
# 2. Stores infos for needbar
env.need_all_needbar[targetid] = {
"docname": env.docname,
"lineno": self.lineno,
"target_node": targetnode,
"env": env,
"error_id": error_id,
"title": title,
"content": content,
"legend": legend,
"x_axis_title": x_axis_title,
"xlabels": xlabels,
"xlabels_rotation": xlabels_rotation,
"y_axis_title": y_axis_title,
"ylabels": ylabels,
"ylabels_rotation": ylabels_rotation,
"separator": separator,
"stacked": stacked,
"show_sum": show_sum,
"transpose": transpose,
"horizontal": horizontal,
"style": style,
"colors": colors,
"text_color": text_color,
}
return [targetnode] + [Needbar("")]
# Algorithm:
# 1. define constants
# 2. pre process data
# 3. process the labels (maybe from content)
# 4. transpose the data if needed
# 5. process content
# 6. calculate index according to configuration and content size
# 7. styling and coloring
# 8. create figure
# 9. final storage
# 10. cleanup matplotlib
def process_needbar(app, doctree, fromdocname):
env = app.builder.env
# NEEDFLOW
for node in doctree.traverse(Needbar):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ("ids", "names", "classes", "dupnames"):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needbar = env.need_all_needbar[id]
# 1. define constants
error_id = current_needbar["error_id"]
separator = current_needbar["separator"]
if not separator:
separator = ","
# 2. pre process data
# local_data: only valid data be stored, e.g. get ried of xlabels or ylabels content
local_data = []
test_columns_length = 0
content = current_needbar["content"]
for x in range(len(content)):
row_data = content[x].split(separator)
local_data.append(row_data)
if x == 0:
test_columns_length = len(row_data)
else:
# We can only process content with the same lenght for each line
if test_columns_length != len(row_data):
raise Exception(f"{error_id}: each content line must have the same length")
# 3. process the labels (maybe from content)
xlabels = current_needbar["xlabels"]
xlabels_in_content = bool(xlabels and len(xlabels) >= 1 and xlabels[0] == "FROM_DATA")
ylabels = current_needbar["ylabels"]
ylabels_in_content = bool(ylabels and len(ylabels) >= 1 and ylabels[0] == "FROM_DATA")
if xlabels_in_content:
# get xlabels from content => first row in content
xlabels = local_data[0]
local_data = local_data[1:] # remove the first row from further processing
if ylabels_in_content: # we have a ylabels in the content:
xlabels = xlabels[1:] # first element (0) in the row has to be ignored
xlabels = [x.strip() for x in xlabels]
if not xlabels: # xlabels not been fetched from parameter or content
xlabels = [str(1 + x) for x in range(len(local_data[0]))]
if ylabels_in_content:
# get ylabels from content => first dataset in each row
ylabels = []
new_local_data = []
for line in local_data:
ylabels.append(line[0]) # fetch ylabels values from first rows
new_local_data.append(line[1:])
local_data = new_local_data
ylabels = [y.strip() for y in ylabels]
if not ylabels: # ylabels not been fetched from parameter or content
ylabels = [str(1 + y) for y in range(len(local_data))]
# ensure length of xlabels == content columns
if not len(xlabels) == len(local_data[0]):
raise Exception(
f"{error_id} length of xlabels: {len(xlabels)} is not equal with sum of columns: {len(local_data[0])}"
)
# ensure length of ylabels == content rows
if not len(ylabels) == len(local_data):
raise Exception(
f"{error_id} length of ylabels: {len(ylabels)} is not equal with sum of rows: {len(local_data)}"
)
# 4. transpose the data if needed
if current_needbar["transpose"]:
local_data = [[local_data[j][i] for j in range(len(local_data))] for i in range(len(local_data[0]))]
tmp = ylabels
ylabels = xlabels
xlabels = tmp
# 5. process content
local_data_number = []
for line in local_data:
line_number = []
for element in line:
element = element.strip()
if element.isdigit():
line_number.append(float(element))
else:
result = len(filter_needs(app, app.env.needs_all_needs.values(), element))
line_number.append(float(result))
local_data_number.append(line_number)
# 6. calculate index according to configuration and content size
index = []
for row in range(len(local_data_number)):
line = []
for column in range(len(local_data_number[0])):
if current_needbar["stacked"]:
line.append(column)
else:
value = row + column * len(local_data_number) + column
line.append(value)
index.append(line)
# 7. styling and coloring
style_previous_to_script_execution = matplotlib.rcParams
# Set matplotlib style
if current_needbar["style"]:
matplotlib.style.use(current_needbar["style"])
else:
# It is necessary to set default style, otherwise the old styling will be used again.
matplotlib.style.use("default")
# set text colors
if current_needbar["text_color"]:
text_color = current_needbar["text_color"].strip()
matplotlib.rcParams["text.color"] = text_color
matplotlib.rcParams["axes.labelcolor"] = text_color
try:
matplotlib.rcParams["xtick.labelcolor"] = text_color
matplotlib.rcParams["ytick.labelcolor"] = text_color
except KeyError:
# labelcolor is not support in this matplotlib version. Use color instead.
matplotlib.rcParams["xtick.color"] = text_color
matplotlib.rcParams["ytick.color"] = text_color
# get bar colors
colors = current_needbar["colors"]
if colors:
# Remove space from color names
colors = [x.strip() for x in colors]
# Handle the cases: len(local_data) > len(colors) or len(local_data) < len(colors)
# We do the same for input color, with transpose the user could forget to change the color accordingly
if not colors or len(colors) == 0:
# Set default colors, if nothing is given
colors = matplotlib.rcParams["axes.prop_cycle"].by_key()["color"]
else:
# extend given colors with default colors
colors = colors + matplotlib.rcParams["axes.prop_cycle"].by_key()["color"]
multi = math.ceil(len(local_data) / len(colors))
if multi > 1:
print(f"{error_id} warning: color schema is smaller than data, double coloring is occurring")
colors = colors * multi
colors = colors[: len(local_data)]
y_offset = numpy.zeros(len(local_data_number[0]))
# 8. create figure
figure, axes = matplotlib.pyplot.subplots()
for x in range(len(local_data_number)):
if not current_needbar["horizontal"]:
bar = axes.bar(
index[x],
local_data_number[x],
bottom=y_offset,
label=ylabels[x],
color=colors[x],
)
else:
bar = axes.barh(
index[x],
local_data_number[x],
left=y_offset,
label=ylabels[x],
color=colors[x],
)
if current_needbar["show_sum"]:
try:
axes.bar_label(bar, label_type="center") # show label in the middel of each bar
except AttributeError: # bar_label is not support in older matplotlib versions
current_needbar["show_sum"] = None
if current_needbar["stacked"]:
y_offset = y_offset + numpy.array(local_data_number[x])
# show for a stacked bar the overall value
if current_needbar["show_sum"] and x == len(local_data_number) - 1:
try:
axes.bar_label(bar)
except AttributeError: # bar_label is not support in older matplotlib versions
current_needbar["show_sum"] = None
if not current_needbar["horizontal"]:
# We want to support even older version of matplotlib, which do not support axes.set_xticks(labels)
x_pos = (numpy.array(index[0]) + numpy.array(index[len(local_data_number) - 1])) / 2
axes.set_xticks(x_pos)
axes.set_xticklabels(labels=xlabels)
else:
# We want to support even older version of matplotlib, which do not support axes.set_yticks(labels)
y_pos = (numpy.array(index[0]) + numpy.array(index[len(local_data_number) - 1])) / 2
axes.set_yticks(y_pos)
axes.set_yticklabels(labels=xlabels)
axes.invert_yaxis() # labels read top-to-bottom
| |
'0277-1691',
'1432-5195': '0341-2695',
'1943-2836': '0020-7284',
'1439-3964': '0172-4622',
'1473-5660': '0342-5282',
'1873-6386': '0160-2527',
'1872-8464': '0165-5876',
'1563-5279': '0020-7454',
'1873-491X': '0020-7489',
'1879-0135': '0020-7519',
'1573-2630': '0165-5701',
'1541-3527': '0091-2174',
'1745-8315': '0020-7578',
'1578-1283': '0213-9111',
'1741-2854': '0020-7640',
'1945-3388': '0198-7569',
'1471-6348': '0266-4623',
'1466-7657': '0020-8132',
'1536-9617': '0020-8167',
'1573-2584': '0301-1623',
'1432-1289': '0020-9554',
'1423-0100': '0300-5526',
'1552-5783': '0146-0404',
'1573-0646': '0167-6997',
'1536-0210': '0020-9996',
'1863-4362': '0021-1265',
'1879-2022': '0019-0578',
'1545-6994': '0021-1753',
'1096-4673': '0161-2840',
'2377-6463': '8756-8160',
'1432-1238': '0342-4642',
'1573-2835': '0091-0627',
'1939-1846': '0021-843X',
'1520-8524': '0001-4966',
'1365-2648': '0309-2402',
'1520-5118': '0021-8561',
'1532-8481': '8755-7223',
'1573-2517': '0165-0327',
'1550-3267': '0148-9917',
'1558-3597': '0735-1097',
'1541-1087': '0731-5724',
'1097-6825': '0091-6749',
'1520-5126': '0002-7863',
'1940-3208': '0744-8481',
'1943-4723': '0002-8177',
'1878-3570': '0002-8223',
'1532-5415': '0002-8614',
'1551-8221': '0098-8421',
'1558-9331': '0003-021X',
'1099-1263': '0260-437X',
'0165-022X': '0165-022X',
'1465-3494': '0140-511X',
'1941-2460': '0003-0651',
'1945-404X': '0090-7421',
'1872-7697': '0167-8760',
'1943-569X': '0003-1488',
'1468-5922': '0021-8774',
'1939-4640': '0196-3635',
'1876-4673': '0910-5050',
'1469-7580': '0021-8782',
'1525-3163': '0021-8812',
'1878-5891': '0378-5955',
'1096-4665': '0739-9332',
'1460-2091': '0305-7453',
'1938-3703': '0021-8855',
'1522-1601': '8750-7587',
'1939-1854': '0021-9010',
'1558-1977': '0889-8588',
'1872-6054': '0168-8510',
'1615-2573': '0910-8327',
'1532-4303': '0277-0903',
'1527-5418': '0890-8567',
'1573-3432': '0162-3257',
'1098-5530': '0021-9193',
'1555-2101': '0160-6689',
'1098-1101': '0733-2459',
'1756-2651': '0021-924X',
'1936-4563': '0094-2499',
'1573-6881': '0145-479X',
'1083-351X': '0021-9258',
'1873-2380': '0021-9290',
'1365-2680': '0144-1795',
'1097-4636': '0021-9304',
'1469-7599': '0021-9320',
'1535-1386': '0021-9355',
'1432-1335': '0171-5216',
'1540-8140': '0021-9525',
'1097-4652': '0021-9541',
'1558-1969': '0749-0712',
'1097-4644': '0730-2312',
'1477-9137': '0021-9533',
'1559-7016': '0271-678X',
'1550-5138': '0887-9311',
'1089-7690': '0021-9606',
'2050-8255': '0300-922X',
'1469-7602': '0305-0009',
'1469-7610': '0021-9630',
'1365-2710': '0269-4727',
'1773-0422': '0021-7697',
'2219-6749': '0258-414X',
'1945-239X': '0021-9665',
'1879-0984': '0166-0934',
'1945-7197': '0021-972X',
'1573-2592': '0271-9142',
'1744-411X': '1380-3395',
'1460-2350': '0268-1161',
'1745-655X': '0197-5897',
'1558-8238': '0021-9738',
'1098-660X': '0095-1137',
'1573-2657': '0142-4319',
'1872-8421': '0165-5728',
'1537-1603': '0736-0258',
'1539-2570': '0271-6798',
'1472-4146': '0021-9746',
'1600-051X': '0303-6979',
'1552-4604': '0091-2700',
'1878-4119': '1010-5182',
'1536-7312': '0196-206X',
'1097-4679': '0021-9762',
'1873-7943': '0005-7916',
'1097-0096': '0091-2751',
'1533-712X': '0271-0749',
'1095-7103': '0021-9797',
'1460-2466': '0021-9916',
'1873-7994': '0021-9924',
'1573-3610': '0094-5145',
'1096-9861': '0021-9967',
'1532-3129': '0021-9975',
'1097-6787': '0190-9622',
'1432-136X': '0174-1578',
'1532-3145': '0363-8715',
'1939-2117': '0022-006X',
'1600-0560': '0303-6987',
'1469-7629': '0022-0299',
'1095-9254': '0140-1971',
'1525-3198': '0022-0302',
'1879-176X': '0300-5712',
'1930-7837': '0022-0337',
'1544-0591': '0022-0345',
'1346-8138': '0385-2407',
'1938-291X': '0022-0493',
'2090-262X': '0013-2446',
'1532-8430': '0022-0736',
'1477-9986': '0022-0744',
'1479-6805': '0022-0795',
'1878-3554': '0099-2399',
'1470-2738': '0143-005X',
'1938-3711': '0022-5002',
'1477-9145': '0022-0949',
'1365-2796': '0954-6820',
'1096-0457': '0022-0965',
'1540-9538': '0022-1007',
'1939-2184': '0097-7403',
'1939-2222': '0096-3445',
'1939-1285': '0278-7393',
'1939-1277': '0096-1523',
'1097-010X': '0022-104X',
'1533-7294': '0094-3509',
'1556-4029': '0022-1198',
'1876-4282': '0015-7368',
'1540-7748': '0022-1295',
'1940-0888': '0022-1309',
'1465-2099': '0022-1317',
'1096-9071': '0146-6615',
'1940-0896': '0022-1325',
'1708-8283': '0883-0738',
'1531-6564': '0363-5023',
'1773-0597': '0181-5512',
'1532-4109': '0360-1234',
'1532-2513': '0892-3973',
'1720-8386': '0391-4097',
'1773-0430': '0368-2315',
'1552-5724': '0898-0101',
'1527-1927': '0361-6878',
'1539-2031': '0192-0790',
'2150-6000': '0022-1465',
'2158-8236': '0735-6722',
'1475-2697': '0022-149X',
'1600-0641': '0168-8278',
'1465-7333': '0022-1503',
'1559-6834': '0899-823X',
'1473-5857': '0268-1315',
'1540-3602': '0091-8369',
'1532-2939': '0195-6701',
'1557-9824': '0891-5520',
'1520-6696': '0022-5061',
'1468-4373': '0022-5045',
'1551-5044': '0022-1554',
'1884-3964': '0300-8134',
'1099-1751': '0749-6753',
'1473-5598': '0263-6352',
'1550-6606': '0022-1767',
'1872-7905': '0022-1759',
'1532-2742': '0163-4453',
'1537-6613': '0022-1899',
'1879-1611': '0022-1910',
'1096-0805': '0022-2011',
'1523-1747': '0022-202X',
'1945-2810': '0888-0395',
'1873-6513': '0885-3924',
'1724-6008': '0393-6155',
'1399-0020': '0901-5027',
'1464-5157': '0265-6736',
'1559-0755': '0257-277X',
'1827-1839': '0392-9590',
'1544-581X': '0148-916X',
'1880-408X': '0385-0110',
'1882-9619': '0047-1801',
'1883-2083': '0021-5384',
'1884-5185': '0300-9149',
'1362-3095': '0955-3002',
'1532-2211': '0266-7681',
'1563-5244': '0883-0185',
'1432-1262': '0179-1958',
'2155-661X': '0023-0294',
'1532-6543': '0022-2143',
'1748-5460': '0022-2151',
'1538-4780': '0022-2194',
'1521-057X': '0194-7648',
'1938-3673': '0741-5400',
'1539-7262': '0022-2275',
'1532-6586': '0161-4754',
'1432-1416': '0303-6812',
'1744-5019': '0360-5310',
'1573-689X': '0148-5598',
'2053-7395': '0022-2917',
'1520-4804': '0022-2623',
'1464-522X': '0309-1902',
'1938-2928': '0022-2585',
'1473-4257': '0306-6800',
'1468-6244': '0022-2593',
'1473-5644': '0022-2615',
'1600-0684': '0047-2565',
'1432-1424': '0022-2631',
'1573-3521': '0160-7715',
'1365-2818': '0022-2720',
'1089-8638': '0022-2836',
'1095-8584': '0022-2828',
'1432-1432': '0022-2844',
'1097-4687': '0362-2525',
'1557-9042': '0897-7151',
'1521-0715': '0092-623X',
'1460-2105': '0027-8874',
'0027-9684': '1943-4693',
'1520-6025': '0163-3864',
'1539-736X': '0022-3018',
'1097-4695': '0022-3034',
'1943-6270': '8756-971X',
'1873-3344': '0162-0134',
'1471-4159': '0022-3042',
'1532-8406': '0883-5403',
'1573-7381': '0300-4864',
'1432-1459': '0340-5354',
'1468-330X': '0022-3050',
'1878-5883': '0022-510X',
'1554-6578': '0022-3069',
'1522-1598': '0022-3077',
'1573-4951': '0920-654X',
'1878-5921': '0895-4356',
'1532-7655': '0737-0016',
'1550-5049': '0889-4655',
'1527-7755': '0732-183X',
'1573-7373': '0167-594X',
'1973-9478': '1120-009X',
'1933-0693': '0022-3085',
'1942-4396': '1043-2256',
'1529-2401': '0270-6474',
'1553-0205': '1043-254X',
'1998-3905': '0970-4388',
'1535-5667': '0161-5505',
'1539-0721': '0002-0443',
'1938-2421': '0148-4834',
'1553-0817': '0741-0581',
'1541-6100': '0022-3166',
'1881-7742': '0301-4800',
'1476-5543': '0743-8346',
'1552-6909': '0884-2175',
'1525-1497': '0884-8734',
'2375-1169': '0740-3232',
'1531-2291': '0890-5339',
'1879-1646': '0167-6296',
'1747-3667': '0952-1178',
'1554-558X': '0894-1912',
'1528-6916': '0885-4726',
'2375-110X': '0030-3941',
'1531-5053': '0278-2391',
'1365-2842': '0305-182X',
'1554-527X': '0736-0266',
'1520-6904': '0022-3263',
'1883-0854': '0030-6622',
'1563-5260': '0167-7063',
'1937-2345': '0022-3395',
'1536-4801': '0277-2116',
'1465-735X': '0146-8693',
'1098-2825': '0887-8013',
'1096-9896': '0022-3417',
'1873-2682': '1011-1344',
'1555-2039': '0368-3494',
'2159-9777': '0279-1072',
'1097-6833': '0022-3476',
'1678-4782': '0021-7557',
'1464-5246': '0265-2048',
'1938-2405': '0191-3913',
'1531-5037': '0022-3468',
'1532-656X': '0891-5245',
'1619-3997': '0300-5577',
'1600-0765': '0022-3484',
'1943-3670': '0022-3492',
'1467-6494': '0022-3506',
'1532-7752': '0022-3891',
'1939-1315': '0022-3514',
'1881-1353': '0386-846X',
'1600-079X': '0742-3098',
'1540-8566': '0163-9366',
'2042-7158': '0022-3573',
'1532-8449': '0882-5963',
'1520-6017': '0022-3549',
'1876-4738': '0914-5087',
'1530-8022': '0885-3282',
'1347-5231': '0031-6903',
'1521-4028': '0233-111X',
'2162-6537': '0731-8898',
'1521-0103': '0022-3565',
'1930-8264': '8750-7315',
'1873-4502': '0886-3350',
'1550-5073': '0893-2190',
'1469-7793': '0022-3751',
'1938-1352': '0748-7711',
'1600-0714': '0904-2512',
'1520-6688': '0276-8739',
'0972-2823': '0022-3859',
'1097-6841': '0022-3913',
'1879-1379': '0022-3956',
'1940-1019': '0022-3980',
'1573-6555': '0090-6905',
'1879-1360': '0022-3999',
'1752-7325': '0022-4006',
'2052-0468': '0035-8665',
'1939-1676': '0891-6640',
'1349-9157': '0449-3060',
'1773-0384': '0221-0363',
'1098-8947': '0743-684X',
'1872-7603': '0165-0378',
'1758-1095': '0141-0768',
'1748-0361': '0890-765X',
'1476-5527': '0950-9240',
'2224-9435': '1019-9128',
'1946-6005': '0363-468X',
'1544-4910': '0038-3139',
'1746-1561': '0022-4391',
'1097-0010': '0022-5142',
'1559-8519': '0022-4499',
'1748-5827': '0022-4510',
'1945-2403': '0146-4760',
'1528-8951': '0148-0731',
'1940-1183': '0022-4545',
'1533-4023': '0160-2446',
'1096-9098': '0022-4790',
'1095-8673': '0022-4804',
'0730-6652': '0733-1959',
'1095-8541': '0022-5193',
'1872-7573': '0378-8741',
'1097-685X': '0022-5223',
'1873-5835': '0145-2126',
'1872-678X': '0165-0270',
'1548-1336': '0160-6972',
'1097-4547': '0360-4012',
'1880-3989': '0388-1350',
'1529-8809': '0022-5282',
'1873-6483': '0740-5472',
'1097-9875': '0731-3810',
'1536-0237': '0883-5993',
'1465-3664': '0142-6338',
'0707-7270': '0381-6605',
'1466-447X': '0264-0414',
'1550-9613': '0278-4297',
'1527-3792': '0022-5347',
'1365-2885': '0140-7783',
'1098-5514': '0022-538X',
'1097-6809': '0741-5214',
'1943-3700': '0090-3558',
'1538-3598': '0098-7484',
'1347-4839': '0047-1828',
'1348-673X': '0021-4868',
'1882-4110': '0021-4930',
'1882-0824': '0485-1439',
'1465-3621': '0368-2811',
'1346-8146': '0021-499X',
'1349-7693': '0446-6586',
'1882-6482': '0021-5082',
'1884-2828': '0021-5112',
'1345-2746': '0386-9830',
'1613-2246': '0021-5155',
'1881-123X': '0021-5120',
'1347-3506': '0021-5198',
'1881-1396': '0021-521X',
'1884-1082': '0021-5236',
'1884-8788': '0374-3527',
'1884-7110': '0021-5287',
'1527-2966': '0099-1767',
'1880-1293': '0022-9717',
'1884-2410': '0022-9776',
'2309-5628': '0023-1207',
'1523-1755': '0085-2538',
'1439-3999': '0023-2165',
'1439-3824': '0300-8630',
'1883-0498': '0023-2513',
'1881-2090': '0023-5679',
'1573-2665': '0141-8955',
'1758-1117': '0023-6772',
'1530-0307': '0023-6837',
'1652-7518': '0023-7205',
'1474-547X': '0140-6736',
'1756-6053': '0023-8309',
'1531-4995': '0023-852X',
'1096-9101': '0196-8092',
'1879-0631': '0024-3205',
'1558-9307': '0024-4201',
'1600-0676': '0106-9543',
'1432-1750': '0341-2040',
'1080-6571': '0278-9671',
'1476-5551': '0887-6924',
'1942-5546': '0025-6196',
'1872-6216': '0047-6374',
'1741-0444': '0140-0118',
'1537-1948': '0025-7079',
'1557-9859': '0025-7125',
'1989-8932': '0210-5187',
'1098-1128': '0198-6325',
'2013-0155': '0379-0355',
'2048-8343': '0025-7273',
'1532-2777': '0306-9877',
'1326-5377': '0025-729X',
'2042-1834': '0025-8172',
'1523-2859': '0025-732X',
'1432-1831': '0300-8584',
'1096-911X': '0098-1532',
'1820-7383': '0025-8105',
'1615-6722': '0723-5003',
'1573-7365': '0885-7490',
'1468-0009': '0887-378X',
'1539-0683': '0361-929X',
'1552-681X': '0272-989X',
'1873-5894': '0730-725X',
'1872-9452': '0098-2997',
'1545-5882': '0145-9740',
'2150-7155': '0025-7206',
'2042-1818': '0025-8024',
'1466-187X': '0142-159X',
'1530-0315': '0195-9131',
'1522-2594': '0740-3194',
'1096-1208': '0882-4010',
'1098-2752': '0738-1085',
'1669-9106': '0025-7680',
'1536-5964': '0025-7974',
'1537-1719': '0737-4038',
'2353-1339': '0465-5893',
'1365-2958': '0950-382X',
'1532-5946': '0090-502X',
'1540-9597': '0276-3869',
'1678-8060': '0074-0276',
'1931-1338': '0047-6765',
'1464-3804': '0267-8357',
'1532-8600': '0026-0495',
'1934-4325': '0076-6941',
'1557-7988': '0076-6879',
'1532-3099': '0266-6138',
'1873-4111': '0378-5122',
'1348-0421': '0385-5600',
'1095-9319': '0026-2862',
'1365-2923': '0308-0110',
'1436-5073': '0026-3672',
'1437-2320': '0344-5607',
'1930-613X': '0026-4075',
'1827-1596': '0375-9393',
'1827-1618': '0026-4725',
'1827-1626': '0026-4733',
'1827-1650': '0026-4784',
'1827-1669': '0026-4806',
'1827-1677': '0026-4849',
'1460-2385': '0931-0509',
'1872-7972': '0304-3940',
'1872-8316': '0167-6253',
'1827-1685': '0026-4903',
'1460-4752': '0265-0568',
'1827-1715': '0026-4946',
'1827-1634': '0391-1977',
'1872-9738': '0892-0362',
'1827-174X': '0026-4970',
'1941-2452': '0884-5336',
'1545-861X': '0149-2195',
'1769-7131': '0987-7053',
'1532-2793': '0260-6917',
'1539-2880': '0730-0832',
'1557-9875': '0733-8619',
'1872-9142': '0161-5890',
'1096-1194': '0890-8508',
'1880-1129': '0301-4894',
'1608-3245': '0026-8933',
'1521-0111': '0026-895X',
'1573-4919': '0300-8177',
'1573-4978': '0301-4851',
'1098-5549': '0270-7306',
'1944-9917': '0888-8809',
'1662-3827': '0077-0809',
'1531-8257': '0885-3185',
'1662-3843': '0077-0892',
'1540-5834': '0037-976X',
'1883-177X': '0389-5386',
'1931-7581': '0027-2507',
'1097-4598': '0148-639X',
'1873-135X': '0027-5107',
'1545-8636': '0892-3787',
'1557-2536': '0027-5514',
'1573-0832': '0301-486X',
'1439-0507': '0933-7407',
'1872-9428': '0166-6851',
'1533-4406': '0028-4793',
'2186-3326': '0027-7622',
'1521-3803': '0027-769X',
'1476-4687': '0028-0836',
'1432-1904': '0028-1042',
'1432-1912': '0028-1298',
'1876-8784': '0028-2162',
'1338-4317': '0028-2685',
'2235-3186': '1660-8151',
'1433-0407': '0028-2804',
'1872-9061': '0300-2977',
'1558-1497': '0197-4580',
'1573-6903': '0364-3190',
'1773-0619': '0028-3770',
'1423-0208': '0251-5350',
'1365-2990': '0305-1846',
'1423-0194': '0028-3835',
'1743-1328': '0161-6412',
'1998-4022': '0028-3886',
'1349-8029': '0470-8105',
'1897-4260': '0028-3843',
'1882-1251': '0301-2603',
'1526-632X': '0028-3878',
'1439-1899': '0174-304X',
'1873-7064': '0028-3908',
'1532-2785': '0143-4179',
'1769-6615': '0222-9617',
'1524-4040': '0148-396X',
'1423-0224': '0302-282X',
'1873-3514': '0028-3932',
'1573-899X': '0097-0549',
'1432-1920': '0028-3940',
'1873-7544': '0306-4522',
'1347-7951': '0048-0428',
'1362-4962': '0305-1048',
'1558-1357': '0029-6465',
'1532-7914': '0163-5581',
'1744-6198': '0029-6473',
'1528-3968': '0029-6554',
'1538-9847': '0029-6562',
'1538-8662': '0361-1817',
'1538-8689': '0360-4039',
'1873-7528': '0149-7634',
'1879-0917': '0921-8696',
'1550-5103': '0363-9568',
'1872-9711': '0161-813X',
'1872-8111': '0168-0102',
'1538-9855': '0363-3624',
'1753-4887': '0029-6643',
'1473-5628': '0143-3636',
'1552-7409': '0894-3184',
'1175-8716': '0028-8446',
'1538-8670': '0744-6314',
'1873-233X': '0029-7844',
'1533-9866': '0029-7828',
'1558-0474': '0889-8545',
'1881-1736': '0030-154X',
'1423-0240': '0378-584X',
'1423-0232': '0030-2414',
'1549-4713': '0161-6420',
'2219-0635': '0030-2465',
'1423-0259': '0030-3747',
'1423-0267': '0030-3755',
'1538-9235': '1040-5488',
'1573-0875': '0169-6149',
'1526-5463': '0030-364X',
'1423-0275': '0301-1569',
'1954-3395': '0078-6608',
'1558-1373': '0030-5898',
'1788-6120': '0030-6002',
'1433-0431': '0085-4530',
'1557-8259': '0030-6665',
'2300-8423': '0030-6657',
'1097-6817': '0194-5998',
'1476-5594': '0950-9232',
'1475-1313': '0275-5408',
'1613-7558': '0030-9338',
'1872-6623': '0304-3959',
'1827-1898': '0031-0808',
'1365-3024': '0141-9838',
'1469-8161': '0031-1820',
'1399-302X': '0902-0055',
'1542-538X': '0744-6020',
'1768-3114': '0369-8114',
'1591-951X': '0031-2983',
'1465-3931': '0031-3025',
'1557-8240': '0031-3955',
'1099-0496': '8755-6863',
'1432-1998': '0301-0449',
'1530-0447': '0031-3998',
'1532-0987': '0891-3668',
'1098-4275': '0031-4005',
'1990-2182': '0031-403X',
'1558-688X': '0031-5125',
'1468-4233': '0301-0066',
'1532-5962': '0031-5117',
'1529-8795': '0031-5982',
'1744-6163': '0031-5990',
'2214-7268': '0167-6555',
'1432-2013': '0031-6768',
'1746-1553': '0309-3646',
'1532-2823': '0952-3278',
'1615-1003': '0048-3664',
'1873-5177': '0091-3057',
'1879-2936': '0031-6989',
'1521-0081': '0031-6997',
'1423-0313': '0031-7012',
'1879-016X': '0163-7258',
'1439-0795': '0176-3679',
'1423-0321': '0031-8388',
'1751-1097': '0031-8655',
'1361-6560': '0031-9155',
'1538-6724': '0031-9023',
'1873-507X': '0031-9384',
'1522-1210': '0031-9333',
'1873-1465': '0031-9406',
'1943-7684': '0031-949X',
'1550-1841': '0741-5206',
'1095-9890': '0147-619X',
'1532-2548': '0032-0889',
'1558-299X': '0095-4543',
'1439-0221': '0032-0943',
'1529-4242': '0032-1052',
'1365-3016': | |
!= METHOD_HEAD) \
or (response.status in (301, 302) and response.method == METHOD_POST):
method = METHOD_GET
data = None
content_length = headers.get(CONTENT_LENGTH, None)
if (content_length is not None) and content_length:
del headers[CONTENT_LENGTH]
redirect_url = response.headers.get(LOCATION, None)
if redirect_url is None:
redirect_url = response.headers.get(URI, None)
if redirect_url is None:
break
response.release()
redirect_url = URL(redirect_url)
scheme = redirect_url.scheme
if scheme not in ('http', 'https', ''):
response.close()
raise ConnectionError(f'Can redirect only to http or https, got {scheme!r}',
history[0].request_info, tuple(history))
elif not scheme:
redirect_url = url.join(redirect_url)
url = redirect_url
params = None
await response.release()
continue
break
response.history = tuple(history)
return response
@property
def closed(self):
"""
Returns whether the ``HTTPClient`` is closed.
Returns
-------
closed : `bool`
"""
connector = self.connector
if connector is None:
return True
if connector.closed:
return True
return False
async def __aenter__(self):
"""
Enters the ``HTTPClient`` as an asynchronous context manager.
This method is a coroutine.
"""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""
Exits the ``HTTPClient`` with closing it.
This method is a coroutine.
"""
self.close()
def __del__(self):
"""
Closes the ``HTTPClient`` closed.
"""
connector = self.connector
if connector is None:
return
self.connector = None
if not connector.closed:
connector.close()
close = __del__
def request(self, method, url, headers=None, **kwargs):
"""
Executes an http request.
Parameters
----------
method : `str`
The method of the request.
url : `str` or ``URL``
The url to request.
headers : `None` or (`dict` or ``imultidict``) of (`str`, `str`) items, Optional
Request headers.
**kwargs : Keyword Parameters
Additional keyword parameters.
Other Parameters
----------------
data : `None` or `Any`, Optional (Keyword only)
Data to send a the body of the request. Defaults to `None`.
params : `None` or `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items, Optional (Keyword only)
Query string parameters. Defaults to `None`.
redirects : `int`, Optional (Keyword only)
The maximal amount of allowed redirects. Defaults to `3`.
Returns
-------
request_context_manager : ``RequestCM``
See Also
--------
- ``.request2`` : Executes an http request with extra parameters returning a request context manager.
- ``.get`` : Shortcut for executing a get request.
- ``.options`` : Shortcut for executing an options request.
- ``.head`` : Shortcut for executing a head request.
- ``.post`` : Shortcut for executing a post request.
- ``.put`` : Shortcut for executing a put request.
- ``.patch`` : Shortcut for executing a patch request.
- ``.delete`` : Shortcut for executing a delete request.
"""
if headers is None:
headers = imultidict()
return RequestCM(self._request(method, url, headers, **kwargs))
def request2(self, method, url, headers=None, **kwargs):
"""
Executes an http request with extra parameters.
Parameters
----------
method : `str`
The method of the request.
url : `str` or ``URL``
The url to request.
headers : `None` or (`dict` or ``imultidict``) of (`str`, `str`) items, Optional
Request headers.
**kwargs : Keyword Parameters
Additional keyword parameters.
Other Parameters
----------------
data : `None` or `Any`, Optional (Keyword only)
Data to send a the body of the request. Defaults to `None`.
params : `None` or `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items, Optional (Keyword only)
Query string parameters. Defaults to `None`.
redirects : `int`, Optional (Keyword only)
The maximal amount of allowed redirects. Defaults to `3`.
auth : `None` or ``BasicAuth``, Optional (Keyword only)
Authorization to use.
proxy_url : `None`, `str` or ``URL``, Optional
Proxy url to use instead of the client's own.
proxy_auth : `None` or ``BasicAuth``, Optional (Keyword only)
Proxy authorization to use instead of the client's.
timeout : `float`, Optional (Keyword only)
The maximal duration to wait for server response. Defaults to `60.0` seconds.
ssl : `ssl.SSLContext`, `bool`, ``Fingerprint``, `NoneType`
Whether and what type of ssl should the connector use.
Returns
-------
request_context_manager : ``RequestCM``
See Also
--------
- ``.request`` : Executes an http request without extra parameters returning a request context manager.
- ``.get`` : Shortcut for executing a get request.
- ``.options`` : Shortcut for executing an options request.
- ``.head`` : Shortcut for executing a head request.
- ``.post`` : Shortcut for executing a post request.
- ``.put`` : Shortcut for executing a put request.
- ``.patch`` : Shortcut for executing a patch request.
- ``.delete`` : Shortcut for executing a delete request.
"""
if headers is None:
headers = imultidict()
return RequestCM(self._request2(method, url, headers, **kwargs))
def get(self, url, headers=None, **kwargs):
"""
Shortcut for executing a get request.
Parameters
----------
url : `str` or ``URL``
The url to request.
headers : `None` or (`dict` or ``imultidict``) of (`str`, `str`) items, Optional
Request headers.
**kwargs : Keyword Parameters
Additional keyword parameters.
Other Parameters
----------------
data : `None` or `Any`, Optional (Keyword only)
Data to send a the body of the request. Defaults to `None`.
params : `None` or `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items, Optional (Keyword only)
Query string parameters. Defaults to `None`.
redirects : `int`, Optional (Keyword only)
The maximal amount of allowed redirects. Defaults to `3`.
Returns
-------
request_context_manager : ``RequestCM``
See Also
--------
- ``.request`` : Executes an http request without extra parameters returning a request context manager.
- ``.request2`` : Executes an http request with extra parameters returning a request context manager.
- ``.options`` : Shortcut for executing an options request.
- ``.head`` : Shortcut for executing a head request.
- ``.post`` : Shortcut for executing a post request.
- ``.put`` : Shortcut for executing a put request.
- ``.patch`` : Shortcut for executing a patch request.
- ``.delete`` : Shortcut for executing a delete request.
"""
if headers is None:
headers = imultidict()
return RequestCM(self._request(METHOD_GET, url, headers, **kwargs))
def options(self, url, headers=None, **kwargs):
"""
Shortcut for executing a get request.
Parameters
----------
url : `str` or ``URL``
The url to request.
headers : `None` or (`dict` or ``imultidict``) of (`str`, `str`) items, Optional
Request headers.
**kwargs : Keyword Parameters
Additional keyword parameters.
Other Parameters
----------------
data : `None` or `Any`, Optional (Keyword only)
Data to send a the body of the request. Defaults to `None`.
params : `None` or `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items, Optional (Keyword only)
Query string parameters. Defaults to `None`.
redirects : `int`, Optional (Keyword only)
The maximal amount of allowed redirects. Defaults to `3`.
Returns
-------
request_context_manager : ``RequestCM``
See Also
--------
- ``.request`` : Executes an http request without extra parameters returning a request context manager.
- ``.request2`` : Executes an http request with extra parameters returning a request context manager.
- ``.get`` : Shortcut for executing a get request.
- ``.head`` : Shortcut for executing a head request.
- ``.post`` : Shortcut for executing a post request.
- ``.put`` : Shortcut for executing a put request.
- ``.patch`` : Shortcut for executing a patch request.
- ``.delete`` : Shortcut for executing a delete request.
"""
if headers is None:
headers = imultidict()
return RequestCM(self._request(METHOD_OPTIONS, url, headers, **kwargs))
def head(self, url, headers=None, **kwargs):
"""
Shortcut for executing a head request.
Parameters
----------
url : `str` or ``URL``
The url to request.
headers : `None` or (`dict` or ``imultidict``) of (`str`, `str`) items, Optional
Request headers.
**kwargs : Keyword Parameters
Additional keyword parameters.
Other Parameters
----------------
data : `None` or `Any`, Optional (Keyword only)
Data to send a the body of the request. Defaults to `None`.
params : `None` or `dict` of (`str`, (`str`, `int`, `float`, `bool`)) items, Optional (Keyword only)
Query string parameters. Defaults to `None`.
redirects : `int`, Optional (Keyword only)
The maximal amount of allowed redirects. Defaults to `3`.
Returns
-------
request_context_manager : ``RequestCM``
See Also
--------
- ``.request`` : Executes an http request without extra parameters returning a request context manager.
| |
<filename>apcupsd.indigoPlugin/Contents/Server Plugin/plugin.py<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
# Copyright (c) 2013, <NAME> All rights reserved.
# with much credit to <NAME> (kormoc) -- https://github.com/BrightcoveOS/Diamond/blob/master/src/collectors/apcupsd/apcupsd.py
#
# Starting with 0.5.0, revised by <NAME> (https://github.com/MartySkinner/Indigo-apcupsd)
#
################################################################################
# Imports
################################################################################
from berkinet import logger
from ghpu import GitHubPluginUpdater
import inspect
import os
import socket
import string
import sys
import threading
import time
import subprocess
################################################################################
# Globals
################################################################################
k_utilityBinaryName = u"apcaccess"
k_utilityBinaryPath = u"/usr/local/sbin:/sbin"
k_utilityCommand = u"{binary} status {address} {port}".format
k_utilityOutputSeparator = u": "
k_utilityOutputSpaceReplacement = u"_"
k_utilityOutputUnitWords = [u'Seconds', u'Minutes', u'Hours', u'Watts', u'Volts', u'Percent']
k_eventServerBindHost = u"0.0.0.0"
k_eventServerListenBacklog = 5
k_eventServerMsgMaxLength = 128 + 16 + 1 # device name + event name + separator
k_eventServerSeparator = u":"
k_eventServerEvents = ['annoyme', 'battattach', 'battdetach', 'changeme', 'commfailure', 'commok', 'doreboot', 'doshutdown', 'emergency', 'endselftest', 'failing', 'killpower', 'loadlimit', 'mainsback', 'offbattery', 'onbattery', 'powerout', 'readApcupsd', 'remotedown', 'runlimit', 'startselftest', 'timeout']
k_eventServerClientSeparator = u","
k_localhostName = u"localhost"
k_localhostAddress = u"127.0.0.1"
# default preference values used if the plugin hasn't been configured
k_apcupsdTimeoutDefault = 8
k_apcupsdFrequencyDefault = 5
k_useIpConnDefault = False
k_useIpConnAccessDefault = k_localhostAddress
k_daysBetweenUpdateChecksDefault = 1
k_overridePathDefault = False
k_utilityPathDefault = u""
k_removeUnitsDefault = True
k_showDebugInfo1Default = 1
# Increment this each time Device.xml changes / adds / deletes ANY device properties / state NAMES
k_deviceUpdateVersion = 1
def startEventServer(self, port):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
self.serverRun = True
socket.setdefaulttimeout(self.apcupsdTimeout)
self.s = threading.Thread(target=eventServer, args=[self, k_eventServerBindHost, port])
self.s.daemon = True
self.s.start()
self.sleep(5)
if self.s.isAlive():
self.log.log(2, dbFlg, u"Event notification server started", self.logName)
self.log.log(2, dbFlg, u"%s: completed" % (funcName), self.logName)
return True
else:
self.log.logError(u"Event notification server failed to start", self.logName)
self.log.log(2, dbFlg, u"%s: completed" % (funcName), self.logName)
return False
def stopEventServer(self):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
self.serverRun = False
if hasattr(self, "s"):
self.log.log(2, dbFlg, u"Event notifications server asked to stop", self.logName)
self.s.join(10)
cnt = 0
while cnt < (self.apcupsdTimeout + 10) and self.s.isAlive():
self.sleep(1)
cnt = cnt + 1
self.log.log(3, dbFlg, u"%s: Event notifications server needed %s delays to stop" % (funcName, cnt), self.logName)
self.log.log(2, dbFlg, u"%s: completed" % (funcName), self.logName)
return
def eventServer(self, host, port):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
self.log.log(3, dbFlg, u"%s: received address: %s and port: %s" % (funcName, host, port), self.logName)
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((host, port))
server.listen(k_eventServerListenBacklog)
except Exception as e:
e1 = sys.exc_info()[0]
self.log.logError(u"%s: problem with socket: %s & %s" % (funcName, e, e1), self.logName)
return
self.log.log(2, dbFlg, u"%s: started listening on %s" % (funcName, server.getsockname()), self.logName)
while self.serverRun:
try:
self.log.log(4, dbFlg, u"%s: waiting for a connection" % (funcName), self.logName)
client, client_address = server.accept()
self.log.log(3, dbFlg, u"%s: client connected from address: %s port: %s" % (funcName, client_address[0], client_address[1]), self.logName)
if client_address[0] in self.useIpConnAccess:
data = client.recv(k_eventServerMsgMaxLength)
if data:
self.log.log(3, dbFlg, u"%s: received %s" % (funcName, data), self.logName)
self.buildAction(data)
else:
self.log.logError(u"%s: unauthorized client attempted access from: address: %s port: %s" % (funcName, client_address[0], client_address[1]), self.logName)
client.close()
except socket.timeout:
pass
except Exception as e:
e1 = sys.exc_info()[0]
self.log.logError(u"%s: read loop: Errors %s & %s" % (funcName, e, e1), self.logName)
pass
self.log.log(2, dbFlg, u"%s: Event notification server closed" % (funcName), self.logName)
########################################
def findInPath(self, file_name, def_path=os.defpath):
funcName = inspect.stack()[0][3]
dbFlg = False
path = os.getenv('PATH', def_path)
self.log.log(2, dbFlg, u"%s: PATH to search: %s" % (funcName, path), self.logName)
for d in path.split(os.pathsep):
file_path = os.path.abspath(os.path.join(d, file_name))
if os.path.exists(file_path):
self.log.log(2, dbFlg, u"%s: found %s" % (funcName, file_path), self.logName)
return file_path
self.log.log(2, dbFlg, u"%s: %s not found in PATH" % (funcName, file_name), self.logName)
return file_name
########################################
def doShell(self, cmd):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s: Called" % (funcName), self.logName)
self.log.log(3, dbFlg, u"%s: command: %s" % (funcName, cmd), self.logName)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = p.communicate()
self.log.log(3, dbFlg, u"%s: returned output\n%s" % (funcName, out), self.logName)
self.log.log(2, dbFlg, u"%s: Completed" % (funcName), self.logName)
return (p.returncode, out)
################################################################################
# delayAmount : 900
# description : plugin action
# * deviceId : 145579207
# pluginId : <ourPluginId>
# * pluginTypeId : apcupsdServerEvent
# * props : <ourPluginId> : (dict)
# actionType : commok (string)
# replaceExisting : True
# textToSpeak :
class Action(object):
def __init__(self):
self.description = u'plugin generated action'
self.deviceId = 0
self.pluginTypeId = None
self.props = {'actionType': None}
def __str__(self):
desc_str = u"description: %s\ndeviceId: %s \npluginId: %s \npluginTypeId: %s \n props: %s \n" %(self.description, self.deviceId, self.pluginId, self.pluginTypeId, self.props)
return desc_str
################################################################################
################################################################################
class Plugin(indigo.PluginBase):
########################################
# Class properties
########################################
def __init__(self, pluginid, pluginDisplayName, pluginVersion, pluginPrefs):
indigo.PluginBase.__init__(self, pluginid, pluginDisplayName, pluginVersion, pluginPrefs)
self.log = logger(self)
self.logName = pluginDisplayName
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
try:
self.apcupsdTimeout = string.atof(self.pluginPrefs["apcupsdTimeout"])
except KeyError:
self.apcupsdTimeout = k_apcupsdTimeoutDefault
self.log.logError(u"The apcupsd plugin appears to have not been configured. Default values will be used until the configuration is changed.", self.logName)
self.apcupsdFrequency = string.atof(self.pluginPrefs.get("apcupsdFrequency", k_apcupsdFrequencyDefault))
self.useIpConn = self.pluginPrefs.get("useIpConn", k_useIpConnDefault)
if self.useIpConn:
self.useIpConnAccess = self.pluginPrefs.get("useIpConnAccess", k_useIpConnAccessDefault).split(k_eventServerClientSeparator)
self.log.log(2, dbFlg, u"%s: read access list: %s" % (funcName, self.useIpConnAccess), self.logName)
self.pluginid = pluginid
self.pluginDisplayName = pluginDisplayName
self.apcupsdCommError = False
self.triggerList = []
self.triggerDict = {}
self.defaultStatesDict = eval(open("../Resources/defaultStates.dict").read())
self.commLostStatesList = eval(open("../Resources/commLostStates.List").read())
self.serverRun = True
self.readLoop = True
self.startingUp = True
# setup the plugin update checker... it will be disabled if the URL is empty
self.updater = GitHubPluginUpdater(self)
daysBetweenUpdateChecks = string.atof(self.pluginPrefs.get("daysBetweenUpdateChecks", k_daysBetweenUpdateChecksDefault))
self.secondsBetweenUpdateChecks = daysBetweenUpdateChecks * 86400
self.nextUpdateCheck = 0 # this will force an update check as soon as the plugin is running
self.utilityBinaryName = k_utilityBinaryName
utilityBinaryPath = k_utilityBinaryPath
if self.pluginPrefs.get("overridePath", k_overridePathDefault) and self.pluginPrefs.get("utilityPath", k_utilityPathDefault) != "":
utilityBinaryPath = self.pluginPrefs["utilityPath"]
self.utilityBinary = findInPath(self, self.utilityBinaryName, utilityBinaryPath)
if self.utilityBinaryName != self.utilityBinary:
self.utilityBinaryFound = True
else:
self.utilityBinaryFound = False
self.log.logError(u"Could not find the '%s' binary. Is the APCUPSD package installed?" % (self.utilityBinaryName), self.logName)
self.removeUnits = self.pluginPrefs.get("removeUnits", k_removeUnitsDefault)
self.logLevel = self.pluginPrefs.get("showDebugInfo1", k_showDebugInfo1Default)
self.log.log(2, dbFlg, u"%s: Completed" % (funcName), self.logName)
########################################
def __del__(self):
indigo.PluginBase.__del__(self)
########################################
def startup(self):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(1, dbFlg, u"%s: Plugin Starting" % (funcName), self.logName)
self.log.log(2, dbFlg, u"%s: completed" % (funcName), self.logName)
########################################
def closedPrefsConfigUi (self, valuesDict, UserCancelled):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
self.log.log(4, dbFlg, u"%s:\nvaluesDict:\n%s\nUserCancelled: %s" % (funcName, valuesDict, UserCancelled), self.logName)
if UserCancelled is False:
self.log = logger(self)
self.apcupsdFrequency = string.atof(valuesDict["apcupsdFrequency"])
lastUseIpConn = self.useIpConn
self.useIpConn = valuesDict["useIpConn"]
if self.useIpConn:
self.apcupsdTimeout = string.atof(valuesDict["apcupsdTimeout"])
self.useIpConnAccess = valuesDict["useIpConnAccess"].split(k_eventServerClientSeparator)
self.log.log(2, dbFlg, u"%s: read access list: %s" % (funcName, self.useIpConnAccess), self.logName)
if lastUseIpConn:
self.log.log(2, dbFlg, u"Event notifications server asked to stop", self.logName)
# because we may have new preferences to put into play, ask any currently running server to stop what its doing
stopEventServer(self)
port = int(valuesDict["useIpConnPort"])
startEventServer(self, port)
else:
# since we don't need a server now, ask any currently running server to stop what its doing
stopEventServer(self)
daysBetweenUpdateChecks = string.atoi(valuesDict["daysBetweenUpdateChecks"])
self.secondsBetweenUpdateChecks = daysBetweenUpdateChecks * 86400
self.nextUpdateCheck = 0 # this will force an update check starting now
self.logLevel = string.atoi(valuesDict["showDebugInfo1"])
if valuesDict["overridePath"] and valuesDict["utilityPath"] != "":
utilityBinaryPath = valuesDict["utilityPath"]
utilityBinary = findInPath(self, self.utilityBinaryName, utilityBinaryPath)
if self.utilityBinaryName != utilityBinary:
self.utilityBinaryPath = utilityBinary
self.log.log(1, dbFlg, u"Plugin options reset. Polling apcupsd servers every %s minutes and a debug level of %i" % (self.apcupsdFrequency, int(valuesDict["showDebugInfo1"])), self.logName)
self.log.log(2, dbFlg, u"%s: Completed" % (funcName), self.logName)
########################################
# this is also called by the custom menu item
########################################
def checkForUpdates(self):
update = self.updater.getLatestRelease()
if update == None:
self.log.logError(u"Error encountered checking for a new plugin version", self.logName)
else:
update = self.updater.checkForUpdate()
########################################
def runConcurrentThread(self):
funcName = inspect.stack()[0][3]
dbFlg = False
self.log.log(2, dbFlg, u"%s called" % (funcName), self.logName)
self.startingUp = False
if self.utilityBinaryFound is False:
self.log.log(2, dbFlg, u"%s: A missing '%s' binary will NOT clear itself without changing the plugin preferences and/or installing the APCUPSD package, then reloading the plugin." % (funcName, self.utilityBinaryName), self.logName)
self.sleep(60*10)
self.log.log(2, dbFlg, u"%s: Completed" % (funcName), self.logName)
return
if self.useIpConn:
port = int(self.pluginPrefs["useIpConnPort"])
startEventServer(self, port)
try:
self.log.log(1, dbFlg, u"Plugin started. Polling apcupsd server(s) every %s minutes" % (self.apcupsdFrequency), self.logName)
except AttributeError:
self.log.logError(u"Plugin start delayed pending completion of initial plugin configuration", self.logName)
return
try:
while True:
self.readLoop = True
if self.secondsBetweenUpdateChecks > 0:
# obtain the current date/time and determine if it is after the previously-calculated
# next check run
timeNow = time.time()
if timeNow > self.nextUpdateCheck:
self.pluginPrefs['updaterLastCheck'] = timeNow
self.log.log(3, dbFlg, | |
import os
import pandas
from BirdRoostDetection.ReadData import Labels
import numpy as np
from BirdRoostDetection import utils
from BirdRoostDetection.PrepareData import NexradUtils
class Batch_Generator():
"""This class organized the machine learning labels and creates ML batches.
Class Variables:
self.root_dir: The directory where the radar images are stored
self.ml_sets: A dictionary containing a list of files that are part of
the given ml set
self.batch_size: the size of the minibatch learning batches
self.label_dict: A dictionary of the labels, the key is the filename,
and the value is a ML_Label object.
"""
def __init__(self,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=32,
root_dir=utils.RADAR_IMAGE_DIR):
self.label_dict = {}
self.root_dir = root_dir
self.no_roost_sets = {}
self.roost_sets = {}
self.no_roost_sets_V06 = {}
self.roost_sets_V06 = {}
self.batch_size = default_batch_size
self.__set_ml_sets(ml_split_csv,
validate_k_index,
test_k_index)
def __set_ml_sets(self,
ml_split_csv,
validate_k_index,
test_k_index):
"""Create Train, test, and Validation set from k data folds.
The k data folds are saved out to ml_split_csv. The fold at the given
test and train indices as set to their corresponding set. The rest
of the data is put into train. This method will initialize the following
class variables: self.train, self.validation, and self.test. Each of
these contains a list of filenames that correspond with the set.
Args:
ml_split_csv: A path to a csv file, where the csv has two columns,
'AWS_file' and 'split_index'.
validate_k_index: The index of the validation set.
test_k_index: The index of the test set.
"""
ml_split_pd = pandas.read_csv(ml_split_csv)
# Remove files that weren't found
all_files = utils.getListOfFilesInDirectory(
self.root_dir + '/All_Color',
'.png')
all_files_dict = {}
for i in range(len(all_files)):
all_files_dict[
os.path.basename(all_files[i]).replace('.png', '')] = True
for index, row in ml_split_pd.iterrows():
if all_files_dict.get(row['AWS_file']) is None:
ml_split_pd.drop(index, inplace=True)
# Sort into train, test, and validation sets
self.__set_ml_sets_helper(self.no_roost_sets, self.no_roost_sets_V06,
ml_split_pd[ml_split_pd.Roost != True],
validate_k_index, test_k_index)
self.__set_ml_sets_helper(self.roost_sets, self.roost_sets_V06,
ml_split_pd[ml_split_pd.Roost],
validate_k_index, test_k_index)
def __set_ml_sets_helper(self, ml_sets, ml_sets_V06, ml_split_pd, val_k,
test_k):
no_val_pd = ml_split_pd[ml_split_pd.split_index != val_k]
ml_sets[utils.ML_Set.training] = list(
no_val_pd[no_val_pd.split_index != test_k]['AWS_file'])
ml_sets[utils.ML_Set.validation] = list(
ml_split_pd[ml_split_pd.split_index == val_k]['AWS_file'])
ml_sets[utils.ML_Set.testing] = list(
ml_split_pd[ml_split_pd.split_index == test_k]['AWS_file'])
for key in ml_sets.keys():
ml_sets_V06[key] = []
for item in ml_sets[key]:
if int(item[-1]) >= 6:
ml_sets_V06[key].append(item)
np.random.shuffle(ml_sets[key])
np.random.shuffle(ml_sets_V06[key])
def get_batch_indices(self, ml_sets, ml_set,
num_temporal_data=0):
indices = np.random.randint(low=0,
high=len(ml_sets[ml_set]),
size=self.batch_size / 2)
return indices
def get_batch(self, ml_set, dualPol, radar_product=None):
ground_truths = []
train_data = []
filenames = []
roost_sets = self.roost_sets
no_roost_sets = self.no_roost_sets
if dualPol:
roost_sets = self.roost_sets_V06
no_roost_sets = self.no_roost_sets_V06
return ground_truths, train_data, filenames, roost_sets, no_roost_sets
class Small_Image_Batch_Generator(Batch_Generator):
def __init__(self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=32,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False):
Batch_Generator.__init__(self, ml_split_csv, validate_k_index,
test_k_index, default_batch_size, root_dir)
ml_label_pd = pandas.read_csv(ml_label_csv)
for index, row in ml_label_pd.iterrows():
self.label_dict[row['AWS_file']] = Labels.ML_Label(row['AWS_file'],
row,
self.root_dir,
high_memory_mode)
def get_batch(self, ml_set, dualPol, radar_product=None,
num_temporal_data=0):
"""Get a batch of data for machine learning. As a default a batch
contains data from for a single radar product.
Args:
ml_set: ML_Set enum value, train, test, or validation.
radar_product: Radar_Product enum value, reflectivity, velocity,
zdr, or rho_hv.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
# len(ml_sets[ml_set])
if ml_set is utils.ML_Set.testing :
ground_truths, train_data, filenames, roost_sets, no_roost_sets = \
Batch_Generator.get_batch(self, ml_set, dualPol, radar_product)
#for ml_sets in [roost_sets, no_roost_sets]:
print len(roost_sets[ml_set])
# indices = range(len(roost_sets[ml_set]))
indices = np.random.randint(low=0,
high=len(roost_sets[ml_set]),
size=750)
for index in indices:
filename = roost_sets[ml_set][index]
label = self.label_dict[filename]
image = self.label_dict[filename].get_image(radar_product)
radar_loc = NexradUtils.getRadarLocation(filename[0:4])
y = (radar_loc[0] - label.latitude) * 89 + 120
x = (radar_loc[1] - label.longitude) * 72.8 + 120
for i in range(5) :
for j in range(5) :
is_small_roost = 0
x_start = i * 40
x_end = i * 40 + 80
y_start = j * 40
y_end = j * 40 + 80
if x >= x_start and x <= x_end \
and y >= y_start and y <= y_end :
is_small_roost += 1
small_image = image[x_start:x_end, y_start:y_end]
ground_truths.append([is_small_roost, 1 - is_small_roost])
filenames.append(filename)
train_data.append(small_image)
train_data_np = np.array(train_data)
shape = train_data_np.shape
train_data_np = train_data_np.reshape(shape[0], shape[1], shape[2],
1)
return train_data_np, np.array(ground_truths), np.array(filenames)
else :
ground_truths, train_data, filenames, roost_sets, no_roost_sets = \
Batch_Generator.get_batch(self, ml_set, dualPol, radar_product)
# for ml_sets in [roost_sets, no_roost_sets]:
indices = Batch_Generator.get_batch_indices(self, roost_sets,
ml_set)
for index in indices:
filename = roost_sets[ml_set][index]
label = self.label_dict[filename]
image = self.label_dict[filename].get_image(radar_product)
radar_loc = NexradUtils.getRadarLocation(filename[0:4])
y = (radar_loc[0] - label.latitude) * 89 + 120
x = (radar_loc[1] - label.longitude) * 72.8 + 120
for i in range(5):
for j in range(5):
is_small_roost = 0
x_start = i * 40
x_end = i * 40 + 80
y_start = j * 40
y_end = j * 40 + 80
if x >= x_start and x <= x_end \
and y >= y_start and y <= y_end:
is_small_roost += 1
small_image = image[x_start:x_end, y_start:y_end]
ground_truths.append(
[is_small_roost, 1 - is_small_roost])
filenames.append(filename)
train_data.append(small_image)
train_data_np = np.array(train_data)
shape = train_data_np.shape
train_data_np = train_data_np.reshape(shape[0], shape[1], shape[2],
1)
return train_data_np, np.array(ground_truths), np.array(filenames)
class Single_Product_Batch_Generator(Batch_Generator):
def __init__(self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=32,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False):
Batch_Generator.__init__(self, ml_split_csv, validate_k_index,
test_k_index, default_batch_size, root_dir)
ml_label_pd = pandas.read_csv(ml_label_csv)
for index, row in ml_label_pd.iterrows():
self.label_dict[row['AWS_file']] = Labels.ML_Label(row['AWS_file'],
row,
self.root_dir,
high_memory_mode)
def get_batch(self, ml_set, dualPol, radar_product=None,
num_temporal_data=0):
"""Get a batch of data for machine learning. As a default a batch
contains data from for a single radar product.
Args:
ml_set: ML_Set enum value, train, test, or validation.
radar_product: Radar_Product enum value, reflectivity, velocity,
zdr, or rho_hv.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
ground_truths, train_data, filenames, roost_sets, no_roost_sets = \
Batch_Generator.get_batch(self, ml_set, dualPol, radar_product)
for ml_sets in [roost_sets, no_roost_sets]:
indices = Batch_Generator.get_batch_indices(self, ml_sets, ml_set)
for index in indices:
filename = ml_sets[ml_set][index]
filenames.append(filename)
is_roost = int(self.label_dict[filename].is_roost)
image = self.label_dict[filename].get_image(radar_product)
ground_truths.append([is_roost, 1 - is_roost])
train_data.append(image)
train_data_np = np.array(train_data)
shape = train_data_np.shape
train_data_np = train_data_np.reshape(shape[0], shape[1], shape[2],
1)
return train_data_np, np.array(ground_truths), np.array(filenames)
class Multiple_Product_Batch_Generator(Batch_Generator):
def __init__(self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=32,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False):
Batch_Generator.__init__(self, ml_split_csv, validate_k_index,
test_k_index, default_batch_size, root_dir)
ml_label_pd = pandas.read_csv(ml_label_csv)
for index, row in ml_label_pd.iterrows():
self.label_dict[row['AWS_file']] = Labels.ML_Label(row['AWS_file'],
row,
self.root_dir,
high_memory_mode)
def get_batch(self, ml_set, dualPol, radar_product=None,
num_temporal_data=0):
"""Get a batch of data for machine learning. This batch contains data
with four channels in it, one for each radar product. For dualPol data
this will be four radar products, and for legacy data this will be two
radar products.
Args:
ml_set: ML_Set enum value, train, test, or validation.
dualPol: Boolean, true if the data is dual pol, false if the radar
data is legacy.
Returns:
train_data, ground_truth, filenames:
The ground truth is an array of batch size, where each item
in the array contains a single ground truth label.
The train_data is an array of images, corresponding to the
ground truth values.
filenames is an array of filenames, corresponding to the
ground truth values.
"""
ground_truths, train_data, filenames, roost_sets, no_roost_sets = \
Batch_Generator.get_batch(self, ml_set, dualPol, radar_product)
for ml_sets in [roost_sets, no_roost_sets]:
indices = Batch_Generator.get_batch_indices(self, ml_sets, ml_set)
for index in indices:
filename = ml_sets[ml_set][index]
filenames.append(filename)
is_roost = int(self.label_dict[filename].is_roost)
images = []
if dualPol:
radar_products = utils.Radar_Products
else:
radar_products = utils.Legacy_radar_products
for radar_product in radar_products:
image = self.label_dict[filename].get_image(radar_product)
images.append(image)
ground_truths.append([is_roost, 1 - is_roost])
train_data.append(images)
# Update to channel last ordering
train_data = np.rollaxis(np.array(train_data), 1, 4)
return train_data, np.array(ground_truths), np.array(
filenames)
class Temporal_Batch_Generator(Batch_Generator):
def __init__(self,
ml_label_csv,
ml_split_csv,
validate_k_index=3,
test_k_index=4,
default_batch_size=32,
root_dir=utils.RADAR_IMAGE_DIR,
high_memory_mode=False):
Batch_Generator.__init__(self, ml_split_csv, validate_k_index,
test_k_index, default_batch_size, root_dir)
ml_label_pd = pandas.read_csv(ml_label_csv)
for index, row in ml_label_pd.iterrows():
Labels.Temporal_ML_Label(
row['AWS_file'],
row,
root_dir,
high_memory_mode,
self.label_dict)
def get_batch(self, ml_set, dualPol, radar_product=None,
num_temporal_data=0):
ground_truths, train_data, filenames, roost_sets, no_roost_sets = \
Batch_Generator.get_batch(self, ml_set, dualPol, radar_product)
for ml_sets in [roost_sets, no_roost_sets]:
indices = Batch_Generator.get_batch_indices(self, ml_sets, ml_set)
for index in indices:
filename = ml_sets[ml_set][index]
filenames.append(filename)
is_roost = int(self.label_dict[filename].is_roost)
images = []
channel_files = self.label_dict[filename].fileNames[
3 - num_temporal_data: 4 + num_temporal_data]
for image_name | |
end)]
if 'STRAIGHT' in self.nose_type or 'OPEN' in self.steps_type:
# face bottom
matids.append(self.idmat_bottom)
faces.append((f + end, f + start, f + offset + start, f + offset + end))
uvs.append([(u, v), (u, 0), (0, 0), (0, v)])
if self.steps_type != 'OPEN':
if 'STRAIGHT' in self.nose_type:
# front face bottom straight
matids.append(self.idmat_raise)
faces.append((f + 12, f + 17, f + 16, f + 13))
uvs.append([(0, w), (v, w), (v, 0), (0, 0)])
elif 'OBLIQUE' in self.nose_type:
# front face bottom oblique
matids.append(self.idmat_raise)
faces.append((f + 12, f + 17, f + 6, f + 3))
uvs.append([(0, w), (v, w), (v, 0), (0, 0)])
matids.append(self.idmat_side)
faces.append((f + 3, f + 13, f + 12))
uvs.append([(0, 0), (u, 0), (u, w)])
matids.append(self.idmat_side)
faces.append((f + 6, f + 17, f + 16))
uvs.append([(0, 0), (u, w), (u, 0)])
# front face top
w = verts[f + 3][2] - verts[f + 4][2]
matids.append(self.idmat_step_front)
faces.append((f + 4, f + 3, f + 6, f + 5))
uvs.append([(0, 0), (0, w), (v, w), (v, 0)])
return rM
def make_faces(self, f, rM, verts, faces, matids, uvs):
if self.z_mode == 'LINEAR':
start = 0
end = 3
offset = 4
matids.extend([self.idmat_side,
self.idmat_top,
self.idmat_side,
self.idmat_bottom])
elif "OPEN" in self.steps_type:
# faces dessus-dessous-lateral marches fermees
start = 3
end = 6
offset = 10
matids.extend([self.idmat_step_side,
self.idmat_top,
self.idmat_step_side,
self.idmat_bottom])
else:
# faces dessus-dessous-lateral marches fermees
start = 0
end = 9
offset = 10
matids.extend([self.idmat_side,
self.idmat_side,
self.idmat_side,
self.idmat_step_side,
self.idmat_top,
self.idmat_step_side,
self.idmat_side,
self.idmat_side,
self.idmat_side,
self.idmat_bottom])
u_l0 = 0
u_l1 = self.t_step * self.left_length
u_r0 = 0
u_r1 = self.t_step * self.right_length
s = int((end - start) / 2)
uvs += [[(u_l0, verts[f + j][2]), (u_l0, verts[f + j + 1][2]),
(u_l1, verts[f + j + offset + 1][2]), (u_l1, verts[f + j + offset][2])] for j in range(start, start + s)]
self.project_uv(rM, uvs, verts, [f + start + s, f + start + s + 1,
f + start + s + offset + 1, f + start + s + offset])
uvs += [[(u_r0, verts[f + j][2]), (u_r0, verts[f + j + 1][2]),
(u_r1, verts[f + j + offset + 1][2]), (u_r1, verts[f + j + offset][2])] for j in range(start + s + 1, end)]
self.project_uv(rM, uvs, verts, [f + end, f + start, f + offset + start, f + offset + end])
faces += [(f + j, f + j + 1, f + j + offset + 1, f + j + offset) for j in range(start, end)]
faces.append((f + end, f + start, f + offset + start, f + offset + end))
class StraightStair(Stair, Line):
def __init__(self, p, v, left_offset, right_offset, steps_type, nose_type, z_mode, nose_z, bottom_z):
Stair.__init__(self, left_offset, right_offset, steps_type, nose_type, z_mode, nose_z, bottom_z)
Line.__init__(self, p, v)
self.l_line = self.offset(-left_offset)
self.r_line = self.offset(right_offset)
def make_step(self, i, verts, faces, matids, uvs, nose_y=0):
rM = self._make_nose(i, i, verts, faces, matids, uvs, nose_y)
t0 = self.t_step * i
f = len(verts)
p = self.l_line.lerp(t0)
self.p3d_left(verts, p, i, t0)
p = self.r_line.lerp(t0)
self.p3d_right(verts, p, i, t0)
t1 = t0 + self.t_step
p = self.l_line.lerp(t1)
self.p3d_left(verts, p, i, t1)
p = self.r_line.lerp(t1)
self.p3d_right(verts, p, i, t1)
self.make_faces(f, rM, verts, faces, matids, uvs)
if "OPEN" in self.steps_type:
faces.append((f + 13, f + 14, f + 15, f + 16))
matids.append(self.idmat_step_front)
uvs.append([(0, 0), (0, 1), (1, 1), (1, 0)])
def get_length(self, side):
return self.length
def get_lerp_vect(self, posts, side, i, t_step, respect_edges, z_offset=0, t0_abs=None):
if t0_abs is not None:
t0 = t0_abs
else:
t0 = i * t_step
t, part, dz, shape = self.get_part(t0, side)
dz /= part.length
n = part.normal(t)
z0 = self.get_z(t0, 'STEP')
z1 = self.get_z(t0, 'LINEAR')
posts.append((n, dz, z0, z1 + t0 * z_offset))
return [t0]
def n_posts(self, post_spacing, side, respect_edges):
return self.steps(post_spacing)
def get_part(self, t, side):
if side == 'LEFT':
part = self.l_line
else:
part = self.r_line
return t, part, self.height, 'LINE'
class CurvedStair(Stair, Arc):
def __init__(self, c, radius, a0, da, left_offset, right_offset, steps_type, nose_type,
z_mode, nose_z, bottom_z, left_shape, right_shape, double_limit=pi):
Stair.__init__(self, left_offset, right_offset, steps_type, nose_type, z_mode, nose_z, bottom_z)
Arc.__init__(self, c, radius, a0, da)
self.l_shape = left_shape
self.r_shape = right_shape
self.edges_multiples = round(abs(da), 6) > double_limit
# left arc, tangeant at start and end
self.l_arc, self.l_t0, self.l_t1, self.l_tc = self.set_offset(-left_offset, left_shape)
self.r_arc, self.r_t0, self.r_t1, self.r_tc = self.set_offset(right_offset, right_shape)
def set_offset(self, offset, shape):
arc = self.offset(offset)
t0 = arc.tangeant(0, 1)
t1 = arc.tangeant(1, 1)
tc = arc.tangeant(0.5, 1)
if self.edges_multiples:
i, p, t = t0.intersect(tc)
tc.v *= 2 * t
tc.p = p
i, p, t2 = tc.intersect(t1)
else:
i, p, t = t0.intersect(t1)
t0.v *= t
t1.p = p
t1.v *= t
return arc, t0, t1, tc
def get_length(self, side):
if side == 'RIGHT':
arc = self.r_arc
shape = self.r_shape
t0 = self.r_t0
else:
arc = self.l_arc
shape = self.l_shape
t0 = self.l_t0
if shape == 'CIRCLE':
return arc.length
else:
if self.edges_multiples:
# two edges
return t0.length * 4
else:
return t0.length * 2
def _make_step(self, t_step, i, s, verts, landing=False):
tb = t_step * i
f = len(verts)
t, part, dz, shape = self.get_part(tb, "LEFT")
p = part.lerp(t)
self.p3d_left(verts, p, s, tb, landing)
t, part, dz, shape = self.get_part(tb, "RIGHT")
p = part.lerp(t)
self.p3d_right(verts, p, s, tb, landing)
return f
def _make_edge(self, t_step, i, j, f, rM, verts, faces, matids, uvs):
tb = t_step * i
# make edges verts after regular ones
if self.l_shape != 'CIRCLE' or self.r_shape != 'CIRCLE':
if self.edges_multiples:
# edge 1
if tb < 0.25 and tb + t_step > 0.25:
f0 = f
f = len(verts)
if self.l_shape == 'CIRCLE':
self.p3d_left(verts, self.l_arc.lerp(0.25), j, 0.25)
else:
self.p3d_left(verts, self.l_tc.p, j, 0.25)
if self.r_shape == 'CIRCLE':
self.p3d_right(verts, self.r_arc.lerp(0.25), j, 0.25)
else:
self.p3d_right(verts, self.r_tc.p, j, 0.25)
self.make_faces(f0, rM, verts, faces, matids, uvs)
# edge 2
if tb < 0.75 and tb + t_step > 0.75:
f0 = f
f = len(verts)
if self.l_shape == 'CIRCLE':
self.p3d_left(verts, self.l_arc.lerp(0.75), j, 0.75)
else:
self.p3d_left(verts, self.l_t1.p, j, 0.75)
if self.r_shape == 'CIRCLE':
self.p3d_right(verts, self.r_arc.lerp(0.75), j, 0.75)
else:
self.p3d_right(verts, self.r_t1.p, j, 0.75)
self.make_faces(f0, rM, verts, faces, matids, uvs)
else:
if tb < 0.5 and tb + t_step > 0.5:
f0 = f
f = len(verts)
# the step goes through the edge
if self.l_shape == 'CIRCLE':
self.p3d_left(verts, self.l_arc.lerp(0.5), j, 0.5)
else:
self.p3d_left(verts, self.l_t1.p, j, 0.5)
if self.r_shape == 'CIRCLE':
self.p3d_right(verts, self.r_arc.lerp(0.5), j, 0.5)
else:
self.p3d_right(verts, self.r_t1.p, j, 0.5)
self.make_faces(f0, rM, verts, faces, matids, uvs)
return f
def make_step(self, i, verts, faces, matids, uvs, nose_y=0):
# open stair with closed face
# step nose
rM = self._make_nose(i, i, verts, faces, matids, uvs, nose_y)
f = 0
if self.l_shape == 'CIRCLE' or self.r_shape == 'CIRCLE':
# every 6 degree
n_subs = max(1, int(abs(self.da) / pi * 30 / self.n_step))
t_step = self.t_step / n_subs
for j in range(n_subs):
f0 = f
f = self._make_step(t_step, n_subs * i + j, i, verts)
if j > 0:
self.make_faces(f0, rM, verts, faces, matids, uvs)
f = self._make_edge(t_step, n_subs * i + j, i, f, rM, verts, faces, matids, uvs)
else:
f = self._make_step(self.t_step, i, i, verts)
f = self._make_edge(self.t_step, i, i, f, rM, verts, faces, matids, uvs)
self._make_step(self.t_step, i + 1, i, verts)
self.make_faces(f, rM, verts, faces, matids, uvs)
if "OPEN" in self.steps_type and self.z_mode != 'LINEAR':
# back face top
faces.append((f + 13, f + 14, f + 15, f + 16))
matids.append(self.idmat_step_front)
uvs.append([(0, 0), (0, 1), (1, 1), (1, 0)])
def get_part(self, t, side):
if side == 'RIGHT':
arc = self.r_arc
shape = self.r_shape
t0, t1, tc = self.r_t0, self.r_t1, self.r_tc
else:
arc = self.l_arc
shape = self.l_shape
t0, t1, tc = self.l_t0, self.l_t1, self.l_tc
if shape == 'CIRCLE':
return t, arc, self.height, shape
else:
if self.edges_multiples:
# two edges
if t <= 0.25:
return 4 * t, t0, 0.25 * self.height, shape
elif t <= 0.75:
return 2 * (t - 0.25), tc, 0.5 * self.height, shape
| |
<reponame>netMedi/hl7apy
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import os
import unittest
import hl7apy
from hl7apy import DEFAULT_ENCODING_CHARS
from hl7apy.core import Message, Segment, Field, Group, Component, SubComponent, ElementProxy
from hl7apy.exceptions import ChildNotValid, ChildNotFound, OperationNotAllowed, InvalidName, \
MaxChildLimitReached, UnsupportedVersion, InvalidEncodingChars, \
MaxLengthReached, MessageProfileNotFound, LegacyMessageProfile
from hl7apy.v2_5 import ST, SI
from hl7apy.validation import VALIDATION_LEVEL
from hl7apy.parser import parse_message, parse_segment
def _get_invalid_encoding_chars():
return {'COMPONENT': '$',
'SUBCOMPONENT': '@',
'REPETITION': 'r',
'ESCAPE': '@'}
def _get_test_msg():
return 'MSH|^~\&|SENDING APP|SENDING FAC|REC APP|REC FAC|20110708162817||OML^O33^OML_O33|978226056138290600|D|2.5|||||USA||EN\r' \
'PID|||1010110909194822^^^GATEWAY_IL&1.3.6.1.4.1.21367.2011.2.5.17&ISO^PK||PIPPO^PLUTO^^^^^L||19790515|M|||VIA DI TOPOLINO^CAGLIARI^CAGLIARI^^09100^100^H^^092009^^~^^^^^^L^^^|||||||PPPPPP79E15B354I^^^CF|||||CAGLIARI|||100|||||||||||\r' \
'PV1||O|||||||||||||||||1107080001^^^LIS\r' \
'SPM|1|100187400201^||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817||||||||^\r' \
'TQ1|||||||||R\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN\r'
def _get_test_msg_2():
return 'MSH|^~\\&|SENDING APP|SENDING FAC|REC APP|REC FAC|20110708162817||OML^O33|978226056138290600|D|2.5|||||USA||EN\r' \
'PID|1||566-554-3423^^^GHH^MR||SURNAME^NAME^A|||M|||1111 SOMEWHERE STREET^^SOMEWHERE^^^USA||555-555-2004~444-333-222|||M\r' \
'PV1||O|||||||||||||||||1107080001^^^LIS\r' \
'SPM|1|100187400201||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN\r' \
'SPM|2|100187400101||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83425|83425|18740|SC||||20110708162817||||||||\rTQ1|||||||||R\r' \
'OBR||83425|83425|CA^S-CALCIUM^^CA||||||||||||ND^Sconosciuto^Sconosciuto\rORC|NW|83426|83426|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\rOBR||83426|83426|HDL^HDL CHOLESTEROL^^HDL||||||||||||ND^UNKNOWN^UNKNOWN\r' \
'ORC|NW|83427|83427|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\rOBR||83427|83427|LDL^LDL CHOLESTEROL^^LDL||||||||||||ND^UNKNOWN^UNKNOWN'
def _get_fail_test_msg():
# This message will fail validation because of the OML_O33 message structure
return 'MSH|^~\\&|SENDING APP|SENDING FAC|REC APP|REC FAC|20110708162817||OML^O33^OML_O33|978226056138290600|D|2.5|||||USA||EN\r' \
'PID|1||566-554-3423^^^GHH^MR||SURNAME^NAME^A|||M|||1111 SOMEWHERE STREET^^SOMEWHERE^^^USA||555-555-2004~444-333-222|||M\r' \
'PV1||O|||||||||||||||||1107080001^^^LIS\r' \
'SPM|1|100187400201||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN\r' \
'SPM|2|100187400101||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83425|83425|18740|SC||||20110708162817||||||||\rTQ1|||||||||R\r' \
'OBR||83425|83425|CA^S-CALCIUM^^CA||||||||||||ND^Sconosciuto^Sconosciuto\rORC|NW|83426|83426|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\rOBR||83426|83426|HDL^HDL CHOLESTEROL^^HDL||||||||||||ND^UNKNOWN^UNKNOWN\r' \
'ORC|NW|83427|83427|18740|SC||||20110708162817||||||||\r' \
'TQ1|||||||||R\rOBR||83427|83427|LDL^LDL CHOLESTEROL^^LDL||||||||||||ND^UNKNOWN^UNKNOWN'
def _get_rsp_k21_mp_msg():
return 'MSH|^~\&|SENDING APP|SENDING FAC|RECEIVING APP|RECEIVING FAC|20140410170011||RSP^K22^RSP_K21|11111111|P|2.5\r' \
'MSA|AA|20140410170015\r' \
'QAK|222222222|OK\r' \
'QPD|IHE PDQ Query|222222222|@PID.3.1.1^3333333|||||^^^IHEFACILITY&1.3.6.1.4.1.21367.3000.1.6&ISO|\r' \
'PID|1||10101109091948^^^GATEWAY&1.3.6.1.4.1.21367.2011.2.5.17&ISO||JOHN^SMITH^^^^^A||19690113|M|||VIA DELLE VIE^^CAGLIARI^^^ITA^H^^092009||||||||||||CAGLIARI|||\r'
class TestMessage(unittest.TestCase):
def setUp(self):
base_path = os.path.abspath(os.path.dirname(__file__))
mp_path = os.path.join(base_path, 'profiles/iti_21')
self.rsp_k21_mp = hl7apy.load_message_profile(mp_path)
legacy_mp = os.path.join(base_path, 'profiles/old_pharm_h4')
self.legacy_mp = hl7apy.load_message_profile(legacy_mp)
# Message test cases
def test_create_empty_message(self):
e = Message()
self.assertEqual(e.classname, 'Message')
self.assertRaises(OperationNotAllowed, Message, validation_level=VALIDATION_LEVEL.STRICT)
def test_create_unknown_message(self):
self.assertRaises(InvalidName, Message, 'AAA_A01')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.2')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.3')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.3.1')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.4')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.5')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.5.1')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.6')
self.assertRaises(InvalidName, Message, 'AAA_A01', version='2.7')
self.assertRaises(InvalidName, Message, 'AAA_A01', validation_level=VALIDATION_LEVEL.STRICT)
def test_create_unsupported_version_message(self):
self.assertRaises(UnsupportedVersion, Message, version='2.0')
def test_create_invalid_encoding_chars_message(self):
self.assertRaises(InvalidEncodingChars, Message, encoding_chars=_get_invalid_encoding_chars())
self.assertRaises(InvalidEncodingChars, Message, 'ADT_A01',
encoding_chars=_get_invalid_encoding_chars(),
validation_level=VALIDATION_LEVEL.STRICT)
def test_create_insensitive(self):
e = Message('oml_o35')
self.assertEqual(e.classname, 'Message')
self.assertTrue(e.is_named('OML_O35'))
def test_add_group_to_message(self):
e = Message('OML_O35')
self.assertRaises(ChildNotFound, e.add_group, 'UNKNOWN_GROUP')
g = e.add_group('OML_O35_PATIENT')
self.assertTrue(g.is_named('OML_O35_PATIENT'))
self.assertEqual(g.classname, 'Group')
self.assertIn(g, e.children)
m = Message('RSP_K21', reference=self.rsp_k21_mp)
g = m.add_group('rsp_k21_query_response')
self.assertTrue(g.is_named('RSP_K21_QUERY_RESPONSE'))
self.assertIn(g, m.children)
def test_add_child_with_different_validation_level(self):
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT)
g = Group('RSP_K21_QUERY_RESPONSE', validation_level=VALIDATION_LEVEL.TOLERANT)
self.assertRaises(OperationNotAllowed, m.add, g)
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.TOLERANT)
g = Group('RSP_K21_QUERY_RESPONSE', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(OperationNotAllowed, m.add, g)
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT)
s = Segment('QPD', validation_level=VALIDATION_LEVEL.TOLERANT)
self.assertRaises(OperationNotAllowed, m.add, s)
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.TOLERANT)
s = Segment('QPD', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(OperationNotAllowed, m.add, s)
def test_add_child_with_different_version(self):
m = Message('RSP_K21', version='2.4')
g = Group('RSP_K21_QUERY_RESPONSE', version='2.5')
self.assertRaises(OperationNotAllowed, m.add, g)
m = Message('RSP_K21', version='2.4')
s = Segment('QPD', version='2.5')
self.assertRaises(OperationNotAllowed, m.add, s)
def test_add_empty_children_to_message(self):
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(ChildNotValid, a.add, Group())
b = Message('OML_O33')
b.add(Group())
c = Message('RSP_K21', self.rsp_k21_mp)
c.add(Group())
def test_add_not_allowed_segment_to_known_message(self):
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(ChildNotValid, a.add, Segment('MSA'))
b = Message('OML_O33')
b.add(Segment('MSA'))
a = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT, reference=self.rsp_k21_mp)
self.assertRaises(ChildNotValid, a.add, Segment('SPM'))
self.assertRaises(ChildNotValid, a.add_segment, 'SPM')
b = Message('RSP_K21', reference=self.rsp_k21_mp)
b.add(Segment('SPM'))
b.add_group('SPM')
def test_create_z_message(self):
Message('ZDT_ZDT')
Message('ZA1_ZB2')
Message('za1_zb2')
self.assertRaises(InvalidName, Message, 'za1azb2')
self.assertRaises(InvalidName, Message, 'z##_azb2')
self.assertRaises(InvalidName, Message, 'zab_zaba')
self.assertRaises(InvalidName, Message, 'zaba_zab')
self.assertRaises(InvalidName, Message, 'OML_ZAB')
self.assertRaises(InvalidName, Message, 'zab_oml')
Message('ZDT_ZDT', validation_level=VALIDATION_LEVEL.STRICT)
Message('ZA1_ZB2', validation_level=VALIDATION_LEVEL.STRICT)
Message('za1_zb2', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'za1azb2', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'z##_azb2', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'zab_zaba', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'zaba_zab', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'OML_ZAB', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(InvalidName, Message, 'zab_oml', validation_level=VALIDATION_LEVEL.STRICT)
def test_add_z_segment(self):
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
a.add(Segment('ZIN', validation_level=VALIDATION_LEVEL.STRICT))
a.add_segment('zap')
a.zbe = 'ZBE||ab|ab|'
b = Message('OML_O33', validation_level=VALIDATION_LEVEL.TOLERANT)
b.add(Segment('ZIN', validation_level=VALIDATION_LEVEL.TOLERANT))
b.add_segment('zap')
b.zbe = 'ZBE||ab|ab|'
a = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT, reference=self.rsp_k21_mp)
a.add(Segment('ZIN', validation_level=VALIDATION_LEVEL.STRICT))
a.add_segment('zap')
a.zbe = 'ZBE||ab|ab|'
a = Message('RSP_K21', validation_level=VALIDATION_LEVEL.TOLERANT, reference=self.rsp_k21_mp)
a.add(Segment('ZIN', validation_level=VALIDATION_LEVEL.TOLERANT))
a.add_segment('zap')
a.zbe = 'ZBE||ab|ab|'
def test_add_to_z_message(self):
m = Message('ZDT_ZDT')
m.add(Segment('PID'))
m.add_segment('ZIN')
m.zap = 'ZAP||21||'
m.add_group('OML_O33_PATIENT')
m = Message('ZDT_ZDT', validation_level=VALIDATION_LEVEL.STRICT)
m.add(Segment('PID', validation_level=VALIDATION_LEVEL.STRICT))
m.add_segment('ZIN')
m.zap = 'ZAP||21||'
m.add_group('OML_O33_PATIENT')
def test_add_known_segment_to_empty_message(self):
a = Message('OML_O33')
a.add(Segment('MSA'))
def test_add_known_group_to_empty_message(self):
a = Message('OML_O33')
a.add(Group('OML_O33_PATIENT'))
def test_assign_wrong_segment_to_known_position(self):
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
b = Message('OML_O33')
with self.assertRaises(ChildNotValid):
a.msh = Segment('SPM')
with self.assertRaises(ChildNotValid):
a.pid = 'EVN||20080115153000||||20080114003000'
with self.assertRaises(InvalidName):
a.zin = 'PAP||abc||'
with self.assertRaises(InvalidName):
a.msh = 'PAP||abc||'
with self.assertRaises(ChildNotValid):
b.msh = Segment('SPM')
with self.assertRaises(ChildNotValid):
b.pid = 'EVN||20080115153000||||20080114003000'
with self.assertRaises(InvalidName):
b.zin = 'PAP||abc||'
with self.assertRaises(InvalidName):
b.pid = 'PAP||abc||'
def test_add_segment_to_message_mix(self):
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.TOLERANT)
msh = Segment('MSH', validation_level=VALIDATION_LEVEL.TOLERANT)
pid = Segment('PID', validation_level=VALIDATION_LEVEL.TOLERANT)
g = Group('OML_O33_PATIENT')
g.add(pid)
a.add(msh)
a.add(g)
def test_assign_value(self):
msg = _get_test_msg()
a = Message('OML_O33', validation_level=VALIDATION_LEVEL.TOLERANT)
parsed_a = parse_message(msg, validation_level=VALIDATION_LEVEL.TOLERANT)
a.value = msg
self.assertEqual(a.to_er7(), parsed_a.to_er7())
b = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
b.value = msg
parsed_b = parse_message(msg, validation_level=VALIDATION_LEVEL.STRICT)
self.assertEqual(b.to_er7(), parsed_b.to_er7())
self.assertEqual(list(b.children.indexes.keys()), list(parsed_b.children.indexes.keys()))
c = Message('ADT_A01', validation_level=VALIDATION_LEVEL.TOLERANT)
with self.assertRaises(OperationNotAllowed):
c.value = msg
msg = msg.replace('^', 'x')
with self.assertRaises(OperationNotAllowed):
a.value = msg
c = Message('OML_O33', version='2.6')
with self.assertRaises(OperationNotAllowed):
c.value = msg
msg = _get_rsp_k21_mp_msg()
a = Message('RSP_K21', validation_level=VALIDATION_LEVEL.TOLERANT, reference=self.rsp_k21_mp)
parsed_a = parse_message(msg, message_profile=self.rsp_k21_mp,
validation_level=VALIDATION_LEVEL.TOLERANT)
a.value = msg
self.assertEqual(a.to_er7(), parsed_a.to_er7())
msg = _get_rsp_k21_mp_msg()
a = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT, reference=self.rsp_k21_mp)
parsed_a = parse_message(msg, message_profile=self.rsp_k21_mp,
validation_level=VALIDATION_LEVEL.STRICT)
a.value = msg
self.assertEqual(a.to_er7(), parsed_a.to_er7())
def test_assign_value_unknown_message(self):
msg = _get_test_msg_2()
a = Message()
parsed_a = parse_message(msg, validation_level=VALIDATION_LEVEL.TOLERANT)
a.value = msg
self.assertEqual(a.name, 'OML_O33')
self.assertEqual(a.to_er7(), parsed_a.to_er7())
def test_message_profile(self):
m = Message('RSP_K21', reference=self.rsp_k21_mp)
# The original qpd_3 is varies
self.assertEqual(m.qpd.qpd_3.datatype, 'QIP')
self.assertFalse(m.qpd.allow_infinite_children)
def test_message_profile_not_found(self):
self.assertRaises(MessageProfileNotFound, Message, 'ADT_A01', reference=self.rsp_k21_mp)
# def test_message_ordered_children(self):
# m = Message('OML_O33')
# m.add(Group('OML_O33_PATIENT'))
# ordered_children = m.children.get_ordered_children()
# self.assertEqual(ordered_children[0][0].name, 'MSH' )
# self.assertIsNone(ordered_children[1])
# self.assertEqual(ordered_children[3][0].name, 'OML_O33_PATIENT')
# self.assertIsNone(ordered_children[2])
# self.assertIsNone(ordered_children[4])
# def test_message_get_children(self):
# m = Message('OML_O33')
# children = m.children.get_children()
# self.assertEqual(len(children), 1)
# m.pid = 'PID|||||bianchi^mario|||'
# children = m.children.get_children()
# self.assertEqual(len(children), 2)
def test_bug_13(self):
m = Message("RSP_K21")
g = m.rsp_k21_query_response
self.assertEqual(id(m.rsp_k21_query_response), id(g)) # test that the ElementProxy is the same
# tests the creation of traversal_indexes item
pid1 = m.rsp_k21_query_response.pid
self.assertIn("RSP_K21_QUERY_RESPONSE", m.children.traversal_indexes)
pid2 = m.rsp_k21_query_response.pid
pid1.value = 'PID|a|b|'
# tests that assigning a child to one occurrence affect also the others
self.assertEqual(pid1.children, pid2.children)
self.assertEqual(pid1.children, m.rsp_k21_query_response.pid.children)
self.assertNotIn("RSP_K21_QUERY_RESPONSE", m.children.traversal_indexes)
sub = m.rsp_k21_query_response.pid.pid_3.cx_10.cwe_1
def test_create_v27_message(self):
m = Message('RSP_K21', version='2.7')
self.assertEqual(m.encoding_chars['TRUNCATION'], '#')
self.assertEqual(m.msh.msh_2.to_er7(), '^~\\&#')
def test_create_v27_message_no_truncation(self):
m = Message('RSP_K21', encoding_chars=DEFAULT_ENCODING_CHARS, version='2.7')
self.assertNotIn('TRUNCATION', m.encoding_chars)
self.assertEqual(m.msh.msh_2.to_er7(), '^~\\&')
def test_legacy_message_profile(self):
self.assertRaises(LegacyMessageProfile, Message, 'RAS_O17', reference=self.legacy_mp)
class TestGroup(unittest.TestCase):
# Group test cases
def setUp(self):
self.oml_o33_specimen = 'SPM|1|100187400201||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817\r' \
'TQ1|||||||||R\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN'
self.rsp_k21_query_response = 'PID|1||10101109091948^^^GATEWAY&1.3.6.1.4.1.21367.2011.2.5.17&ISO||JOHN^SMITH^^^^^A||19690113|M|||VIA DELLE VIE^^CAGLIARI^^^ITA^H^^092009||||||||||||CAGLIARI'
base_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(base_path, 'profiles/iti_21')
self.rsp_k21_mp = hl7apy.load_message_profile(path)
def test_create_unknown_group(self):
self.assertRaises(InvalidName, Group, 'UNKNOWN')
self.assertRaises(InvalidName, Group, 'UNKNOWN', validation_level=VALIDATION_LEVEL.STRICT)
def test_create_unamed_group_strict(self):
self.assertRaises(OperationNotAllowed, Group, validation_level=VALIDATION_LEVEL.STRICT)
def test_add_child_with_different_validation_level(self):
g = Group('RSP_K21_QUERY_RESPONSE', validation_level=VALIDATION_LEVEL.STRICT)
s = Segment('PID', validation_level=VALIDATION_LEVEL.TOLERANT)
self.assertRaises(OperationNotAllowed, g.add, s)
g = Group('RSP_K21_QUERY_RESPONSE', validation_level=VALIDATION_LEVEL.TOLERANT)
s = Segment('PID', validation_level=VALIDATION_LEVEL.STRICT)
self.assertRaises(OperationNotAllowed, g.add, s)
def test_add_child_with_different_version(self):
g = Group('RSP_K21_QUERY_RESPONSE', version='2.5')
s = Segment('QPD', version='2.4')
self.assertRaises(OperationNotAllowed, g.add, s)
def test_add_unexpected_child_to_group(self):
g = Group()
m = Message('OML_O33')
f = Field()
c = Component(datatype='ST')
sub = SubComponent(datatype='ST')
self.assertRaises(ChildNotValid, g.add, m)
self.assertRaises(ChildNotValid, g.add, f)
self.assertRaises(ChildNotValid, g.add, c)
self.assertRaises(ChildNotValid, g.add, sub)
def test_delete_group(self):
m = Message('OML_O33', validation_level=VALIDATION_LEVEL.TOLERANT)
g = Group('OML_O33_PATIENT', validation_level=VALIDATION_LEVEL.TOLERANT)
m.add(g)
self.assertTrue(g in m.children)
del m.oml_o33_patient
self.assertFalse(g in m.children)
m = Message('OML_O33', validation_level=VALIDATION_LEVEL.STRICT)
g = Group('OML_O33_PATIENT', validation_level=VALIDATION_LEVEL.STRICT)
m.add(g)
self.assertTrue(g in m.children)
del m.oml_o33_patient
self.assertFalse(g in m.children)
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.TOLERANT, reference=self.rsp_k21_mp)
g = m.add_group('RSP_K21_QUERY_RESPONSE')
self.assertTrue(g in m.children)
del m.rsp_k21_query_response
self.assertFalse(g in m.children)
m = Message('RSP_K21', validation_level=VALIDATION_LEVEL.STRICT, reference=self.rsp_k21_mp)
g = m.add_group('RSP_K21_QUERY_RESPONSE')
self.assertTrue(g in m.children)
del m.rsp_k21_query_response
self.assertFalse(g in m.children)
def test_create_supported_version_group(self):
Group(version='2.5')
def test_create_unsupported_version_group(self):
self.assertRaises(UnsupportedVersion, Group, version='2.0')
def test_add_z_segment(self):
a = Group('OML_O33_PATIENT', validation_level=VALIDATION_LEVEL.STRICT)
a.add(Segment('ZIN', validation_level=VALIDATION_LEVEL.STRICT))
a.add_segment('zap')
a.zbe = 'ZBE||ab|ab|'
b | |
points.append(seg.c1)
shape = Shape(points)
shapes.append(shape)
points = []
if len(points) > 0:
points.append(self.c1)
shape = Shape(points)
shapes.append(shape)
def add_points(self):
"""
add points from intersection data
"""
points = []
if self.nbsegs > 0:
for seg in self._segs:
points.append(seg.c0)
for split in seg.splits:
points.append(split[1])
points.append(self.c1)
self._create_segments(points)
def set_users(self):
"""
add users on segments and points
"""
for seg in self._segs:
seg.add_user()
def consume(self):
self.available = False
class Qtree(_QuadTree):
"""
The top spatial index to be created by the user. Once created it can be
populated with geographically placed members that can later be tested for
intersection with a user inputted geographic bounding box.
"""
def __init__(self, coordsys, extend=EPSILON, max_items=MAX_ITEMS, max_depth=MAX_DEPTH):
"""
objs may be blender objects or shapely geoms
extend: how much seek arround
"""
self._extend = extend
self._geoms = []
# store input coordsys
self.coordsys = coordsys
super(Qtree, self).__init__(0, 0, coordsys.width, coordsys.height, max_items, max_depth)
@property
def ngeoms(self):
return len(self._geoms)
def build(self, geoms):
"""
Build a spacial index from shapely geoms
"""
t = time.time()
self._geoms = geoms
for i, geom in enumerate(geoms):
self._insert(i, geom.bounds)
print("Qtree.build() :%.2f seconds" % (time.time() - t))
def insert(self, id, geom):
self._geoms.append(geom)
self._insert(id, geom.bounds)
def newPoint(self, co):
point = Point(co, self._extend)
count, found = self.intersects(point)
for id in found:
return self._geoms[id]
self.insert(self.ngeoms, point)
return point
def newSegment(self, c0, c1):
"""
allow "opposite" segments,
those segments are not found by intersects
and not stored in self.geoms
"""
new_seg = Segment(c0, c1, self._extend)
count, found = self.intersects(new_seg)
for id in found:
old_seg = self._geoms[id]
if (old_seg.c0 == c0 and old_seg.c1 == c1):
return old_seg
if (old_seg.c0 == c1 and old_seg.c1 == c0):
if not old_seg.opposite:
old_seg.opposite = new_seg
new_seg.original = old_seg
return old_seg.opposite
self.insert(self.ngeoms, new_seg)
return new_seg
def intersects(self, geom):
selection = list(self._intersect(geom.bounds))
count = len(selection)
return count, sorted(selection)
class Io():
@staticmethod
def ensure_iterable(obj):
try:
iter(obj)
except TypeError:
obj = [obj]
return obj
# Conversion methods
@staticmethod
def _to_geom(shape):
if not shape.valid:
raise RuntimeError('Cant convert invalid shape to Shapely LineString')
return shapely.geometry.LineString(shape.coords)
@staticmethod
def shapes_to_geoms(shapes):
return [Io._to_geom(shape) for shape in shapes]
@staticmethod
def _to_shape(geometry, shapes):
global vars_dict
if vars_dict['point_tree'] is None:
raise RuntimeError("geoms to shapes require a global point_tree spacial index")
if hasattr(geometry, 'exterior'):
Io._to_shape(geometry.exterior, shapes)
for geom in geometry.interiors:
Io._to_shape(geom, shapes)
elif hasattr(geometry, 'geoms'):
# Multi and Collections
for geom in geometry.geoms:
Io._to_shape(geom, shapes)
else:
points = list(vars_dict['point_tree'].newPoint(p) for p in list(geometry.coords))
shape = Shape(points)
shapes.append(shape)
@staticmethod
def geoms_to_shapes(geoms, shapes=[]):
for geom in geoms:
Io._to_shape(geom, shapes)
return shapes
# Input methods
@staticmethod
def _interpolate_bezier(pts, wM, p0, p1, resolution):
# straight segment, worth testing here
# since this can lower points count by a resolution factor
# use normalized to handle non linear t
if resolution == 0:
pts.append(wM * p0.co.to_3d())
else:
v = (p1.co - p0.co).normalized()
d1 = (p0.handle_right - p0.co).normalized()
d2 = (p1.co - p1.handle_left).normalized()
if d1 == v and d2 == v:
pts.append(wM * p0.co.to_3d())
else:
seg = interpolate_bezier(wM * p0.co,
wM * p0.handle_right,
wM * p1.handle_left,
wM * p1.co,
resolution)
for i in range(resolution - 1):
pts.append(seg[i].to_3d())
@staticmethod
def _coords_from_spline(wM, resolution, spline):
pts = []
if spline.type == 'POLY':
pts = [wM * p.co.to_3d() for p in spline.points]
if spline.use_cyclic_u:
pts.append(pts[0])
elif spline.type == 'BEZIER':
points = spline.bezier_points
for i in range(1, len(points)):
p0 = points[i - 1]
p1 = points[i]
Io._interpolate_bezier(pts, wM, p0, p1, resolution)
pts.append(wM * points[-1].co)
if spline.use_cyclic_u:
p0 = points[-1]
p1 = points[0]
Io._interpolate_bezier(pts, wM, p0, p1, resolution)
pts.append(pts[0])
return pts
@staticmethod
def _add_geom_from_curve(curve, invert_world, resolution, geoms):
wM = invert_world * curve.matrix_world
for spline in curve.data.splines:
pts = Io._coords_from_spline(wM, resolution, spline)
geom = shapely.geometry.LineString(pts)
geoms.append(geom)
@staticmethod
def curves_to_geoms(curves, resolution, geoms=[]):
"""
@curves : blender curves collection
Return coordsys for outputs
"""
curves = Io.ensure_iterable(curves)
coordsys = CoordSys(curves)
t = time.time()
for curve in curves:
Io._add_geom_from_curve(curve, coordsys.invert, resolution, geoms)
print("Io.curves_as_line() :%.2f seconds" % (time.time() - t))
return coordsys
@staticmethod
def _add_shape_from_curve(curve, invert_world, resolution, shapes):
global vars_dict
wM = invert_world * curve.matrix_world
for spline in curve.data.splines:
pts = Io._coords_from_spline(wM, resolution, spline)
pts = [vars_dict['point_tree'].newPoint(pt) for pt in pts]
shape = Shape(points=pts)
shapes.append(shape)
@staticmethod
def curves_to_shapes(curves, coordsys, resolution, shapes=[]):
"""
@curves : blender curves collection
Return simple shapes
"""
curves = Io.ensure_iterable(curves)
t = time.time()
for curve in curves:
Io._add_shape_from_curve(curve, coordsys.invert, resolution, shapes)
print("Io.curves_to_shapes() :%.2f seconds" % (time.time() - t))
# Output methods
@staticmethod
def _poly_to_wall(scene, matrix_world, poly, height, name):
global vars_dict
curve = bpy.data.curves.new(name, type='CURVE')
curve.dimensions = "2D"
curve.fill_mode = 'BOTH'
curve.extrude = height
n_ext = len(poly.exterior.coords)
n_int = len(poly.interiors)
Io._add_spline(curve, poly.exterior)
for geom in poly.interiors:
Io._add_spline(curve, geom)
curve_obj = bpy.data.objects.new(name, curve)
curve_obj.matrix_world = matrix_world
scene.objects.link(curve_obj)
curve_obj.select = True
scene.objects.active = curve_obj
return n_ext, n_int, curve_obj
@staticmethod
def wall_uv(me, bm):
for face in bm.faces:
face.select = face.material_index > 0
bmesh.update_edit_mesh(me, True)
bpy.ops.uv.cube_project(scale_to_bounds=False, correct_aspect=True)
for face in bm.faces:
face.select = face.material_index < 1
bmesh.update_edit_mesh(me, True)
bpy.ops.uv.smart_project(use_aspect=True, stretch_to_bounds=False)
@staticmethod
def to_wall(scene, coordsys, geoms, height, name, walls=[]):
"""
use curve extrude as it does respect vertices number and is not removing doubles
so it is easy to set material index
cap faces are tri, sides faces are quads
"""
bpy.ops.object.select_all(action='DESELECT')
geoms = Io.ensure_iterable(geoms)
for poly in geoms:
if hasattr(poly, 'exterior'):
half_height = height / 2.0
n_ext, n_int, obj = Io._poly_to_wall(scene, coordsys.world, poly, half_height, name)
bpy.ops.object.convert(target="MESH")
bpy.ops.object.mode_set(mode='EDIT')
me = obj.data
bm = bmesh.from_edit_mesh(me)
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
for v in bm.verts:
v.co.z += half_height
nfaces = 0
for i, f in enumerate(bm.faces):
bm.faces[i].material_index = 2
if len(f.verts) > 3:
nfaces = i
break
# walls without holes are inside
mat_index = 0 if n_int > 0 else 1
for i in range(nfaces, nfaces + n_ext - 1):
bm.faces[i].material_index = mat_index
for i in range(nfaces + n_ext - 1, len(bm.faces)):
bm.faces[i].material_index = 1
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.003)
bmesh.update_edit_mesh(me, True)
Io.wall_uv(me, bm)
bpy.ops.mesh.dissolve_limited(angle_limit=0.00349066, delimit={'NORMAL'})
bpy.ops.mesh.dissolve_degenerate()
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.shade_flat()
MaterialUtils.add_wall_materials(obj)
walls.append(obj)
return walls
@staticmethod
def _add_spline(curve, geometry):
coords = list(geometry.coords)
spline = curve.splines.new('POLY')
spline.use_endpoint_u = False
spline.use_cyclic_u = coords[0] == coords[-1]
spline.points.add(len(coords) - 1)
for i, coord in enumerate(coords):
x, y, z = Vector(coord).to_3d()
spline.points[i].co = (x, y, z, 1)
@staticmethod
def _as_spline(curve, geometry):
"""
add a spline into a blender curve
@curve : blender curve
"""
if hasattr(geometry, 'exterior'):
# Polygon
Io._add_spline(curve, geometry.exterior)
for geom in geometry.interiors:
Io._add_spline(curve, geom)
elif hasattr(geometry, 'geoms'):
# Multi and Collections
for geom in geometry.geoms:
Io._as_spline(curve, geom)
else:
# LinearRing, LineString and Shape
Io._add_spline(curve, geometry)
@staticmethod
def to_curve(scene, coordsys, geoms, name, dimensions='3D'):
global vars_dict
t = time.time()
geoms = Io.ensure_iterable(geoms)
curve = bpy.data.curves.new(name, type='CURVE')
curve.dimensions = dimensions
for geom in geoms:
Io._as_spline(curve, geom)
curve_obj = bpy.data.objects.new(name, curve)
curve_obj.matrix_world = coordsys.world
scene.objects.link(curve_obj)
curve_obj.select = True
print("Io.to_curves() :%.2f seconds" % (time.time() - t))
return curve_obj
@staticmethod
def to_curves(scene, coordsys, geoms, name, dimensions='3D'):
geoms = Io.ensure_iterable(geoms)
return [Io.to_curve(scene, coordsys, geom, name, dimensions) for geom in geoms]
class ShapelyOps():
@staticmethod
def min_bounding_rect(geom):
""" min_bounding_rect
minimum area oriented bounding rect
"""
# Compute edges (x2-x1,y2-y1)
if geom.convex_hull.geom_type == 'Polygon':
hull_points_2d = [list(coord[0:2]) for coord in list(geom.convex_hull.exterior.coords)]
else:
hull_points_2d = [list(coord[0:2]) for coord in list(geom.convex_hull.coords)]
edges = np.zeros((len(hull_points_2d) - 1, 2))
# empty 2 column array
for i in range(len(edges)):
edge_x = hull_points_2d[i + 1][0] - hull_points_2d[i][0]
edge_y = hull_points_2d[i + 1][1] - hull_points_2d[i][1]
edges[i] = [edge_x, edge_y]
# Calculate edge angles atan2(y/x)
edge_angles = np.zeros((len(edges))) # empty 1 column array
for i in range(len(edge_angles)):
edge_angles[i] = atan2(edges[i, 1], edges[i, 0])
# Check for angles in 1st quadrant
for i in range(len(edge_angles)):
edge_angles[i] = abs(edge_angles[i] % (pi / 2)) # want strictly positive answers
# Remove duplicate angles
edge_angles = np.unique(edge_angles)
# Test each angle to find bounding box with smallest area
min_bbox = (0, sys.maxsize, 0, 0, 0, 0, 0, 0) # rot_angle, area, width, height, min_x, max_x, min_y, max_y
# print "Testing", len(edge_angles), "possible rotations for bounding box... \n"
for i in range(len(edge_angles)):
# Create rotation matrix to shift points to baseline
# R = [ cos(theta) , cos(theta-PI/2)
# cos(theta+PI/2) , cos(theta) ]
| |
null=True, on_delete=models.SET_NULL, help_text="Цель посещения")
fin_source = models.ForeignKey(IstochnikiFinansirovaniya, default=None, blank=True, null=True, on_delete=models.SET_NULL, help_text="Перезаписать источник финансирования из направления")
first_time = models.BooleanField(default=False, help_text="Впервые")
result_reception = models.ForeignKey(ResultOfTreatment, default=None, blank=True, null=True, on_delete=models.SET_NULL, help_text="Результат обращения")
outcome_illness = models.ForeignKey(Outcomes, default=None, blank=True, null=True, on_delete=models.SET_NULL, help_text="Исход")
place = models.ForeignKey(Place, default=None, blank=True, null=True, on_delete=models.SET_NULL, help_text="Условие оказание помощи")
diagnos = models.CharField(blank=True, help_text="Заключительный Диагноз приема", default="", max_length=255)
maybe_onco = models.BooleanField(default=False, help_text="Подозрение на онко")
creator = models.ForeignKey(
DoctorProfile,
null=True,
blank=True,
default=None,
related_name="doc_add_research",
db_index=True,
help_text='Профиль пользователя, добавившего услуги к созданному направлению',
on_delete=models.SET_NULL,
)
parent = models.ForeignKey('self', related_name='parent_issledovaniye', help_text="Исследование основание", blank=True, null=True, default=None, on_delete=models.SET_NULL)
medical_examination = models.DateField(blank=True, null=True, default=None, help_text="Дата осмотра")
localization = models.ForeignKey(directory.Localization, blank=True, null=True, default=None, help_text="Локализация", on_delete=models.SET_NULL)
service_location = models.ForeignKey(directory.ServiceLocation, blank=True, null=True, default=None, help_text="Место оказания услуги", on_delete=models.SET_NULL)
link_file = models.CharField(max_length=255, blank=True, null=True, default=None, help_text="Ссылка на файл")
study_instance_uid = models.CharField(max_length=64, blank=True, null=True, default=None, help_text="uuid снимка - экземпляр")
study_instance_uid_tag = models.CharField(max_length=64, blank=True, null=True, default=None, help_text="study instance_uid tag")
acsn_id = models.CharField(max_length=55, blank=True, null=True, default=None, help_text="N3-ОДИИ уникальный идентификатор заявки")
n3_odii_task = models.CharField(max_length=55, blank=True, null=True, default=None, help_text="N3-ОДИИ идентификатор Task заявки")
n3_odii_service_request = models.CharField(max_length=55, blank=True, null=True, default=None, help_text="N3-ОДИИ идентификатор ServiceRequest заявки")
n3_odii_patient = models.CharField(max_length=55, blank=True, null=True, default=None, help_text="N3-ОДИИ идентификатор пациента заявки")
n3_odii_uploaded_task_id = models.CharField(max_length=55, blank=True, null=True, default=None, help_text="N3-ОДИИ идентификатор Task результата")
gen_direction_with_research_after_confirm = models.ForeignKey(
directory.Researches, related_name='research_after_confirm', null=True, blank=True, help_text='Авто назначаемое при подтверждении', on_delete=models.SET_NULL
)
aggregate_lab = JSONField(null=True, blank=True, default=None, help_text='ID направлений лаборатории, привязаных к стационарному случаю')
aggregate_desc = JSONField(null=True, blank=True, default=None, help_text='ID направлений описательных, привязаных к стационарному случаю')
microbiology_conclusion = models.TextField(default=None, null=True, blank=True, help_text='Заключение по микробиологии')
hospital_department_override = models.ForeignKey(Podrazdeleniya, blank=True, null=True, default=None, help_text="Отделение стационара", on_delete=models.SET_NULL)
doc_add_additional = models.ForeignKey(
DoctorProfile, null=True, blank=True, related_name="doc_add_additional", db_index=True, help_text='Профиль-добавил исполнитель дополнительные услуги', on_delete=models.SET_NULL
)
@property
def time_save_local(self):
return localtime(self.time_save)
@property
def time_confirmation_local(self):
return localtime(self.time_confirmation)
def get_stat_diagnosis(self):
pass
@property
def hospital_department_replaced_title(self):
if not self.research or not self.research.is_hospital:
return None
if self.hospital_department_override:
return self.hospital_department_override.get_title()
return None
@property
def doc_confirmation_fio(self):
if self.doc_confirmation_string:
return self.doc_confirmation_string
if self.doc_confirmation:
return self.doc_confirmation.get_fio()
return ''
@property
def doc_confirmation_full_fio(self):
if self.doc_confirmation:
return self.doc_confirmation.get_fio()
return ''
@property
def doc_position(self):
if self.doc_confirmation:
return self.doc_confirmation.position.title
return ''
def gen_after_confirm(self, user: User):
if not self.time_confirmation or not self.gen_direction_with_research_after_confirm:
return
Napravleniya.gen_napravleniya_by_issledovaniya(
self.napravleniye.client_id,
"",
self.napravleniye.parent.napravleniye.istochnik_f_id if self.napravleniye.parent else self.napravleniye.istochnik_f_id,
"",
None,
user.doctorprofile,
{-1: [self.gen_direction_with_research_after_confirm_id]},
{},
False,
{},
vich_code="",
count=1,
discount=0,
parent_iss=self.napravleniye.parent_id or self.pk,
parent_auto_gen=None,
)
def __str__(self):
return "%s %s" % (str(self.napravleniye), self.research.title)
def is_get_material(self):
"""
Осуществлен ли забор всего материала для исследования
:return: True, если весь материал взят
"""
return self.tubes.filter().exists() and all([x.doc_get is not None for x in self.tubes.filter()])
@property
def material_date(self):
dt = self.time_confirmation
if self.tubes.filter(time_get__isnull=False).exists():
t = self.tubes.filter(time_get__isnull=False)[0]
dt = t.time_get
return strfdatetime(dt, '%Y-%m-%d')
def get_visit_date(self, force=False):
if not self.time_confirmation and not force:
return ""
if not self.napravleniye.visit_date or not self.napravleniye.visit_who_mark:
self.napravleniye.visit_date = timezone.now()
self.napravleniye.visit_who_mark = self.doc_confirmation
self.napravleniye.save()
return strdate(self.napravleniye.visit_date)
def get_medical_examination(self):
if not self.medical_examination:
if self.napravleniye.visit_date or self.time_confirmation:
self.medical_examination = (self.napravleniye.visit_date or self.time_confirmation).date()
else:
self.medical_examination = current_time(only_date=True)
self.save(update_fields=['medical_examination'])
return self.medical_examination
def is_receive_material(self):
"""
Осуществлен ли прием материала лабораторией
:return: True, если весь материал принят
"""
return self.is_get_material() and all([x.doc_recive is not None for x in self.tubes.filter()])
def get_analyzer(self):
return "" if not self.api_app else self.api_app.name
def allow_reset_confirm(self, user: User):
from api.stationar.stationar_func import forbidden_edit_dir
if not self.time_confirmation:
return False
if user.is_superuser:
return True
groups = [str(x) for x in user.groups.all()]
if self.research.can_transfer:
return "Сброс подтверждения переводного эпикриза" in groups
if self.research.is_extract:
return "Сброс подтверждения выписки" in groups
if forbidden_edit_dir(self.napravleniye_id):
return False
if self.napravleniye and self.napravleniye.eds_total_signed:
return "Сброс подтверждений результатов" in groups
ctp = int(0 if not self.time_confirmation else int(time.mktime(timezone.localtime(self.time_confirmation).timetuple())))
ctime = int(time.time())
current_doc_confirmation = self.doc_confirmation
executor_confirmation = self.executor_confirmation
rt = SettingManager.get("lab_reset_confirm_time_min") * 60
return (
ctime - ctp < rt and (current_doc_confirmation == user.doctorprofile or (executor_confirmation is not None and executor_confirmation == user.doctorprofile))
) or "Сброс подтверждений результатов" in groups
class Meta:
verbose_name = 'Назначение на исследование'
verbose_name_plural = 'Назначения на исследования'
class MonitoringResult(models.Model):
PERIOD_HOUR = 'PERIOD_HOUR'
PERIOD_DAY = 'PERIOD_DAY'
PERIOD_WEEK = 'PERIOD_WEEK'
PERIOD_MONTH = 'PERIOD_MONTH'
PERIOD_QURTER = 'PERIOD_QURTER'
PERIOD_HALFYEAR = 'PERIOD_HALFYEAR'
PERIOD_YEAR = 'PERIOD_YEAR'
PERIOD_TYPES = (
(PERIOD_HOUR, 'Час'),
(PERIOD_DAY, 'День'),
(PERIOD_WEEK, 'Неделя'),
(PERIOD_MONTH, 'Месяц'),
(PERIOD_QURTER, 'Квартал'),
(PERIOD_HALFYEAR, 'Полгода'),
(PERIOD_YEAR, 'Год'),
)
napravleniye = models.ForeignKey(Napravleniya, null=True, help_text='Направление', db_index=True, on_delete=models.CASCADE)
research = models.ForeignKey(directory.Researches, null=True, blank=True, help_text='Вид мониторинга/исследования из справочника', db_index=True, on_delete=models.CASCADE)
issledovaniye = models.ForeignKey(Issledovaniya, db_index=True, help_text='Заказ на мониторинг, для которого сохранен результат', on_delete=models.CASCADE)
hospital = models.ForeignKey(Hospitals, default=None, blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
group_id = models.IntegerField(default=None, blank=True, null=True, db_index=True, help_text='Группа результата')
group_order = models.IntegerField(default=None, blank=True, null=True)
field_id = models.IntegerField(default=None, blank=True, null=True, db_index=True, help_text='Поле результата')
field_order = models.IntegerField(default=None, blank=True, null=True)
field_type = models.SmallIntegerField(default=None, blank=True, choices=directory.ParaclinicInputField.TYPES, null=True)
value_aggregate = models.DecimalField(max_digits=12, decimal_places=2, default=None, blank=True, null=True)
value_text = models.TextField(default='', blank=True)
type_period = models.CharField(max_length=20, db_index=True, choices=PERIOD_TYPES, help_text="Тип периода")
period_param_hour = models.PositiveSmallIntegerField(default=None, blank=True, null=True)
period_param_day = models.PositiveSmallIntegerField(default=None, blank=True, null=True, db_index=True)
period_param_week_description = models.CharField(max_length=5, blank=True, null=True, default=None, help_text="Описание недельного периода")
period_param_week_date_start = models.DateField(blank=True, null=True, default=None, help_text="Дата начала недельного периода")
period_param_week_date_end = models.DateField(blank=True, null=True, default=None, help_text="Дата окончания недельного периода")
period_param_month = models.PositiveSmallIntegerField(default=None, blank=True, null=True, db_index=True)
period_param_quarter = models.PositiveSmallIntegerField(default=None, blank=True, null=True)
period_param_halfyear = models.PositiveSmallIntegerField(default=None, blank=True, null=True)
period_param_year = models.PositiveSmallIntegerField(default=None, blank=True, null=True, db_index=True)
period_date = models.DateField(blank=True, null=True, default=None, help_text="Фактическая дата для периодов")
class Meta:
verbose_name = 'Мониторинг результаты'
verbose_name_plural = 'Мониторинг результаты'
class MonitoringStatus(models.Model):
STATUS_PREPARED = 'PREPARED'
STATUS_APPROVED = 'APPROVED'
STATUS_REJECTED = 'REJECTED'
STATUS_TYPES = (
(STATUS_PREPARED, 'Подготовлен'),
(STATUS_APPROVED, 'Утвержден'),
(STATUS_REJECTED, 'Отклонен'),
)
napravleniye = models.ForeignKey(Napravleniya, null=True, help_text='Направление', db_index=True, on_delete=models.CASCADE)
type_status = models.CharField(max_length=20, db_index=True, choices=STATUS_TYPES, help_text="Cтатус мониторинга")
time_change_status = models.DateTimeField(null=True, blank=True, db_index=True, help_text='Время изменения статуса')
comment = models.CharField(max_length=255, default="", blank=True, help_text='Комментарий в случае отклонения')
who_change_status = models.ForeignKey(DoctorProfile, null=True, blank=True, db_index=True, help_text='Профиль пользователя изменившего статус', on_delete=models.SET_NULL)
class Dashboard(models.Model):
title = models.CharField(max_length=255, default="", help_text='Название дашборда', db_index=True)
hide = models.BooleanField(default=False, blank=True, help_text='Скрытие дашборда', db_index=True)
order = models.SmallIntegerField(default=-99, blank=True, null=True)
def __str__(self):
return f"{self.title}"
class Meta:
verbose_name = 'Дашборд'
verbose_name_plural = 'Дашборды'
class DashboardCharts(models.Model):
COLUMN = 'COLUMN'
BAR = 'BAR'
PIE = 'PIE'
LINE = 'LINE'
TABLE = 'TABLE'
DEFAULT_TYPE = (
(COLUMN, 'Столбцы'),
(BAR, 'Полоса'),
(PIE, 'Пирог-куски'),
(LINE, 'Линейная диаграмма'),
(TABLE, 'Таблица'),
)
title = models.CharField(max_length=255, default="", help_text='Название дашборда', db_index=True)
dashboard = models.ForeignKey(Dashboard, null=True, help_text='Дашборд', db_index=True, on_delete=models.CASCADE)
order = models.SmallIntegerField(default=-99, blank=True, null=True)
hide = models.BooleanField(default=False, blank=True, help_text='Скрытие графика', db_index=True)
hospitals_group = models.ForeignKey(HospitalsGroup, default=None, blank=True, null=True, db_index=True, help_text="Группа больниц", on_delete=models.CASCADE)
is_full_width = models.BooleanField(default=False, blank=True, help_text='На всю ширину страницы')
default_type = models.CharField(max_length=20, db_index=True, choices=DEFAULT_TYPE, default=COLUMN, help_text="Тип графика по умолчанию")
def __str__(self):
return f"{self.title} - Дашборд: {self.dashboard.title}"
class Meta:
verbose_name = 'Дашборд-Графики'
verbose_name_plural = 'Дашборд-Графики'
class DashboardChartFields(models.Model):
charts = models.ForeignKey(DashboardCharts, null=True, help_text='График', db_index=True, on_delete=models.CASCADE)
field = models.ForeignKey(directory.ParaclinicInputField, null=True, help_text='Поле', db_index=True, on_delete=models.CASCADE)
title_for_field = models.CharField(max_length=255, default="", help_text='Переопределение название поля в графике', db_index=True)
order = models.SmallIntegerField(default=-99, blank=True, null=True)
hide = models.BooleanField(default=False, blank=True, help_text='Скрытие поля', db_index=True)
def __str__(self):
return f"{self.field.title} - {self.charts.title}"
class Meta:
verbose_name = 'Дашборд-Поле для графика'
verbose_name_plural = 'Дашборд-Поля для графика'
class MonitoringSumFieldByDay(models.Model):
field = models.ForeignKey(directory.ParaclinicInputField, null=True, help_text='Поле', db_index=True, on_delete=models.CASCADE)
title = models.CharField(max_length=255, default="", help_text='Заголовок данных', db_index=True)
order = models.SmallIntegerField(default=None, blank=True, null=True)
def __str__(self):
return f"{self.field.title}"
class Meta:
verbose_name = 'Поле сумма за день'
verbose_name_plural = 'Поля сумм за день'
class MonitoringSumFieldTotal(models.Model):
field = models.ForeignKey(directory.ParaclinicInputField, null=True, help_text='Поле', db_index=True, on_delete=models.CASCADE)
title = models.CharField(max_length=255, default="", help_text='Заголовок данных', db_index=True)
date_start = models.DateField(blank=True, null=True, default=None, help_text="Дата начала отсчета")
def __str__(self):
return f"{self.field.title}"
class Meta:
verbose_name = 'Поле сумма за период от даты'
verbose_name_plural = 'Поля сумм за период от даты'
class MethodsOfTaking(models.Model):
drug_prescription = models.CharField(max_length=128, db_index=True)
method_of_taking = models.CharField(max_length=128, db_index=True)
count = models.IntegerField()
@staticmethod
def inc(dp, method):
objs = MethodsOfTaking.objects.filter(drug_prescription=dp, method_of_taking=method)
if not objs.exists():
MethodsOfTaking(drug_prescription=dp, method_of_taking=method, count=1).save()
else:
obj = objs[0]
obj.count += 1
obj.save()
@staticmethod
def dec(dp, method):
objs = MethodsOfTaking.objects.filter(drug_prescription=dp, method_of_taking=method)
if objs.exists():
obj = objs[0]
obj.count -= 1
obj.save()
class Recipe(models.Model):
issledovaniye = models.ForeignKey(Issledovaniya, db_index=True, help_text='Направление на исследование, для которого сохранен рецепт', on_delete=models.CASCADE)
drug_prescription = models.CharField(max_length=128, db_index=True)
method_of_taking = models.CharField(max_length=128)
comment = models.CharField(max_length=128)
class Meta:
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
class TypeJob(models.Model):
title = models.CharField(max_length=255, db_index=True)
hide = models.BooleanField(help_text="Скрыть тип", default=False)
value = models.DecimalField(max_digits=5, decimal_places=2, help_text="Ценность работы (в УЕТ или минутах-зависит от названия работы)")
def __str__(self):
return self.title
class Meta:
verbose_name = 'Тип работы'
verbose_name_plural = 'Типы работ'
class EmployeeJob(models.Model):
type_job = models.ForeignKey(TypeJob, db_index=True, help_text='Тип косвенных работ', on_delete=models.CASCADE)
count = models.SmallIntegerField(default=0, help_text="Количество данного типа", | |
ColumnFilter(hxl.data(url), include_tags=['org', 'sector', 'activity'])
# or
filter = hxl.data(url).with_columns(['org', 'sector', 'activity'])
@see: L{RowFilter}
"""
def __init__(self, source, include_tags=[], exclude_tags=[], skip_untagged=False):
"""Construct a column filter.
@param source: a L{hxl.model.Dataset}
@param include_tags: an include list of L{tag patterns<hxl.model.TagPattern>} objects to include
@param exclude_tags: an exclude list of tag patterns objects to exclude
@param skip_untagged: True if all columns without HXL hashtags should be removed
"""
super(ColumnFilter, self).__init__(source)
self.include_tags = hxl.model.TagPattern.parse_list(include_tags)
self.exclude_tags = hxl.model.TagPattern.parse_list(exclude_tags)
self.skip_untagged = skip_untagged
self.indices = [] # saved indices for columns to include
def filter_columns(self):
"""@returns: filtered list of column definitions"""
columns_in = self.source.columns
columns_out = []
for i in range(len(columns_in)):
if self._test_column(columns_in[i]):
columns_out.append(copy.deepcopy(columns_in[i]))
self.indices.append(i) # save index to avoid retesting for data
return columns_out
def filter_row(self, row):
"""@returns: filtered list of row values"""
values = []
for i in self.indices:
try:
values.append(row.values[i])
except IndexError:
pass # don't add anything
return values
def _test_column(self, column):
"""Test whether a column should be included in the output. If there
is an include list, it must be in that list; if there is an
exclude list, it must not be in that list.
@param column: the L{hxl.model.Column} to test
@returns: True if the column should be included
"""
if self.include_tags:
# exclude list
for pattern in self.include_tags:
if pattern.match(column):
# succeed as soon as we match an included pattern
return True
# fail if there was an exclude list and we didn't match
return False
if self.exclude_tags or self.skip_untagged:
# skip untagged columns?
if self.skip_untagged and not column.tag:
return False
# exclude list
for pattern in self.exclude_tags:
if pattern.match(column):
# fail as soon as we match an excluded pattern
return False
# not an include list, and no reason to exclude
return True
@staticmethod
def _load(source, spec):
"""Create a filter object from a JSON-like spec.
Note that there are two JSON filter definitions for this class: "with_columns" and "without_columns".
@param spec: the specification
@returns: a L{ColumnFilter} object
"""
if spec.get('filter') == 'with_columns':
return ColumnFilter(
source=source,
include_tags=req_arg(spec, 'includes')
)
else:
return ColumnFilter(
source=source,
exclude_tags=req_arg(spec, 'excludes'),
skip_untagged=opt_arg(spec, 'skip_untagged')
)
class CountFilter(AbstractCachingFilter):
"""Composable filter class to aggregate rows in a HXL dataset.
This class supports the L{hxl.model.Dataset.count} convenience
method and the L{hxl.scripts.hxlcount} command-line script.
This is a L{caching filter<AbstractCachingFilter>} that performs
aggregate actions such as counting, summing, and averaging across
multiple rows of data. For example, it can reduce a dataset to a
list of the number of times that each organisation or sector
appears. This is the main filter for producing reports, or the
data underlying charts and other visualisations; it is also useful
for anonymising data by rolling it up to higher levels of
abstraction.
This example counts the number of rows for each organisation::
filter = CountFilter(hxl.data(url), 'org')
# or
filter = hxl.data(url).count('org')
You can do multiple levels of counting like this::
filter = hxl.data(url).count(['org', 'sector'])
You can also use the I{queries} argument to limit the counting to
specific fields. This example will count only the rows where C{#adm1} is set to "Coast"::
filter = hxl.data(url).count('org', queries='adm1=Coast')
"""
def __init__(self, source, patterns, aggregators=None, queries=[]):
"""Construct a new count filter
If the caller does not supply any aggregators, use "count() as Count#meta+count"
@param source: a L{hxl.model.Dataset}
@param patterns: a single L{tag pattern<hxl.model.TagPattern>} or list of tag patterns that, together, form a unique key for counting.
@param aggregators: one or more Aggregator objects or string representations to define the output.
@param queries: an optional list of L{row queries<hxl.model.RowQuery>} to filter the rows being counted.
"""
super().__init__(source)
self.patterns = hxl.model.TagPattern.parse_list(patterns)
if not aggregators:
aggregators = 'count() as Count#meta+count'
self.aggregators = Aggregator.parse_list(aggregators)
self.queries = self._setup_queries(queries)
def filter_columns(self):
"""@returns: the filtered columns"""
columns = []
# Add columns being counted
for pattern in self.patterns:
column = pattern.find_column(self.source.columns)
if column:
columns.append(copy.deepcopy(column))
else:
columns.append(hxl.Column())
# Add generated columns
for aggregator in self.aggregators:
columns.append(aggregator.column)
return columns
def filter_rows(self):
"""@returns: the filtered row values"""
raw_data = []
# each item is a sequence containing a tuple of key values and an _Aggregator object
for aggregate in self._aggregate_data():
raw_data.append(
list(aggregate[0]) + [aggregator.value if aggregator.value is not None else '' for aggregator in aggregate[1]]
)
return raw_data
def _aggregate_data(self):
"""Read the entire source dataset and produce saved aggregate data.
@returns: the aggregated values as raw data
"""
aggregators = {}
# read the whole source dataset at once
for row in self.source:
# will always match if there are no queries
if hxl.model.RowQuery.match_list(row, self.queries):
# get the values in the order we need them
values = [hxl.datatypes.normalise_space(row.get(pattern, default='')) for pattern in self.patterns]
# make a dict key for the aggregator
key = tuple(values)
if not key in aggregators:
aggregators[key] = [copy.deepcopy(aggregator) for aggregator in self.aggregators]
for aggregator in aggregators[key]:
aggregator.evaluate_row(row)
# sort the aggregators by their keys
return sorted(aggregators.items())
@staticmethod
def _load(source, spec):
"""Create a new count filter from a dict spec.
@param spec: the JSON-like spec
@returns: a new L{CountFilter} object
"""
return CountFilter(
source = source,
patterns=opt_arg(spec, 'patterns'),
aggregators=opt_arg(spec, 'aggregators', None),
queries=opt_arg(spec, 'queries', [])
)
class DeduplicationFilter(AbstractStreamingFilter):
"""Composable filter to deduplicate a HXL dataset.
Removes duplicate lines from a dataset, where "duplicate" is
optionally defined by a set of keys specified by the user. As a
result, not all values in duplicate rows will necessarily be
identical. The filter will always return the *first* matching row
of a set of duplicates.
Supports the hxldedup command-line script.
TODO: add more-sophisticated matching, edit distance, etc.
"""
def __init__(self, source, patterns=None, queries=[]):
"""
Constructor
@param source: the upstream source dataset
@param patterns: if provided, a list of tag patterns for columns to use for uniqueness testing.
@param filters: optional list of filter queries for columns to be considered for deduplication.
"""
super().__init__(source)
self.patterns = hxl.model.TagPattern.parse_list(patterns)
self.seen_map = set() # row signatures that we've seen so far
self.queries = self._setup_queries(queries)
def filter_row(self, row):
"""@returns: the row's values, or C{None} if it's a duplicate"""
if hxl.model.RowQuery.match_list(row, self.queries):
if not row:
return None
key = row.key(self.patterns)
if key in self.seen_map:
return None
# if we get to here, we haven't seen the row before
self.seen_map.add(key)
return copy.copy(row.values)
else:
return row.values
@staticmethod
def _load(source, spec):
"""Create a dedup filter from a dict spec.
@param source: the upstream source
@param spec: the JSON-like spec
@returns: a L{DeduplicationFilter} object
"""
return DeduplicationFilter(
source = source,
patterns=opt_arg(spec, 'patterns', []),
queries=opt_arg(spec, 'queries', [])
)
class ExpandListsFilter(AbstractBaseFilter):
"""Expand in-cell lists by duplicating data rows.
"""
def __init__(self, source, patterns=None, separator="|", correlate=False, queries=[]):
super().__init__(source)
self.separator = str(separator)
self.scan_columns(patterns)
self.correlate = correlate
self.queries = self._setup_queries(queries)
"""The row queries to limit where we expand lists"""
def filter_columns(self):
""" Remove the +list attribute from targeted columns """
columns = list(self.source.columns)
for index in self.column_indices:
column = copy.deepcopy(columns[index])
column.remove_attribute('list')
columns[index] = column
return columns
def scan_columns(self, patterns):
""" Save the indices of the columns containing lists """
self.column_indices = []
if patterns:
patterns = hxl.model.TagPattern.parse_list(patterns)
for i, column in enumerate(self.source.columns):
if hxl.model.TagPattern.match_list(column, patterns):
self.column_indices.append(i)
else:
for i, column in enumerate(self.source.columns):
if column.has_attribute("list"):
self.column_indices.append(i)
def __iter__(self):
# Special case: no columns to expand
if len(self.column_indices) == 0:
for row in self.source:
yield row
return
# Regular case
min_length = max(self.column_indices) + 1
for row in self.source:
# If there are queries, the row must match one of them
if not hxl.model.RowQuery.match_list(row, self.queries):
yield row
continue
# parse the lists
value_lists = []
for index in self.column_indices:
if index < len(row.values):
values = str(row.values[index]).split(self.separator)
value_lists.append(list(map(hxl.datatypes.normalise_space, values)))
else:
value_lists.append([""])
if (self.correlate):
# correlate the lists
nrows = max([len(item) for item in value_lists])
for i in range(0, nrows):
values = copy.deepcopy(row.values)
for j, v in enumerate(value_lists):
index = self.column_indices[j]
if len(v) <= i:
values[index] = ""
else:
values[index] = v[i]
yield hxl.model.Row(self.columns, values)
else:
# generate the cartesian product of all the lists
# generate the cartesian product of the values
row_value_list = list(itertools.product(*value_lists))
# yield all of the resulting rows
for | |
<reponame>HighEloDevs/atus_tests
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtCore import QObject, QJsonValue, QUrl, QVariant, pyqtSignal, pyqtSlot
from scipy.odr import ODR, Model as SciPyModel, RealData
from lmfit.models import ExpressionModel
from lmfit import Parameters
from copy import deepcopy
from io import StringIO
class Model(QObject):
"""
Class used for fit.
"""
# Signals
fillDataTable = pyqtSignal(str, str, str, str, str, str, arguments=['x', 'y', 'sy', 'sx', 'filename'])
fillParamsTable = pyqtSignal(str, float, float, arguments=['param', 'value', 'uncertainty'])
writeInfos = pyqtSignal(str, arguments='expr')
uploadData = pyqtSignal(QVariant, str, arguments=['data', 'fileName'])
def __init__(self, messageHandler):
super().__init__()
pd.set_option('display.expand_frame_repr', False)
self._msgHandler = messageHandler
self._data = None
self._data_json = None
self._exp_model = ""
self._indVar = "x"
self._model = None
self._report_fit = ""
self._mat_corr = ""
self._mat_cov = ""
self._dict_param = dict()
self._result = None
self._coef = list()
self._par_var = list()
self._params = Parameters()
self._dict = dict()
self._dict2 = dict()
self._p0 = None
self.xmin_adj = 0.
self.xmax_adj = 0.
self._mode = 0
self._has_data = False
self._isvalid = False
self._has_sx = True
self._has_sy = True
def __str__(self):
return self._report_fit
@pyqtSlot(QJsonValue)
def loadDataTable(self, data = None):
""" Getting data from table. """
df = pd.DataFrame.from_records(data, columns = ['x', 'y', 'sy', 'sx', 'bool'])
# Removing not chosen rows
df = df[df['bool'] == 1]
del df['bool']
uniqueSi = df["sy"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em y, removendo coluna de sy.")
self._has_sy = False
uniqueSi = df["sx"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em x, removendo coluna de sx.")
self._has_sx = False
self._data_json = deepcopy(df)
# Turn everything into number (str -> number)
df = df.astype(float)
self._data = deepcopy(df)
self._has_data = True
@pyqtSlot()
def loadDataClipboard(self):
"""Pega a tabela de dados do Clipboard."""
# Instantiating clipboard
clipboard = QGuiApplication.clipboard()
clipboardText = clipboard.mimeData().text()
# try:
# Creating a dataframe from the string
df = pd.read_csv(StringIO(clipboardText), sep = '\t', header = None, dtype = str).replace(np.nan, "0")
# Replacing all commas for dots
for i in df.columns:
df[i] = [x.replace(',', '.') for x in df[i]]
df[i] = df[i].astype(str)
self.load_data(df=df)
@pyqtSlot(str)
def load_data(self, data_path='', df=None, df_array=None):
"""Loads the data from a given path or from a given dataframe."""
# Name of the loaded file
fileName = 'Dados Carregados do Projeto'
# If no dataframe passed, loading data from the given path
if len(data_path) > 0:
# Loading from .csv or (.txt and .tsv)
data_path = QUrl(data_path).toLocalFile()
if data_path[-3:] == "csv":
try:
df = pd.read_csv(data_path, sep=',', header=None, dtype = str).replace(np.nan, "0")
except pd.errors.ParserError:
self._msgHandler.raiseError("Separação de colunas de arquivos csv são com vírgula (","). Rever dados de entrada.")
return None
else:
try:
df = pd.read_csv(data_path, sep='\t', header=None, dtype = str).replace(np.nan, "0")
except pd.errors.ParserError:
self._msgHandler.raiseError("Separação de colunas de arquivos txt e tsv são com tab. Rever dados de entrada.")
return None
# Getting file name
fileName = data_path.split('/')[-1]
elif df is None:
df = pd.DataFrame.from_records(df_array, columns = ['x', 'y', 'sy', 'sx', 'bool'])
del df['bool']
uniqueSi = df["sy"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em y, removendo coluna de sy.")
self._has_sy = False
uniqueSi = df["sx"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em x, removendo coluna de sx.")
self._has_sx = False
# Saving the dataframe in the class
self._data_json = deepcopy(df)
# Applying some filters over the df
for i in df.columns:
# Replacing comma for dots
df[i] = [x.replace(',', '.') for x in df[i]]
self._data_json[i] = [x.replace(',', '.') for x in self._data_json[i]]
try:
df[i] = df[i].astype(float)
except ValueError:
self._msgHandler.raiseError("A entrada de dados só permite entrada de números. Rever arquivo de entrada.")
return None
self._has_sx = True
self._has_sy = True
self._mode = len(df.columns) - 2
# Naming columns
if self._mode == -1:
self._has_sy = not self._has_sy
self._has_sx = not self._has_sx
df["x"] = np.arange(len(df), dtype = float)
self._data_json = deepcopy(df.astype(str))
self._data_json.columns = ['y', 'x']
df["sy"] = 0.
df["sx"] = 0.
elif self._mode == 0:
self._has_sy = not self._has_sy
self._has_sx = not self._has_sx
self._data_json.columns = ['x', 'y']
df["sy"] = 0.
df["sx"] = 0.
elif self._mode == 1:
self._has_sx = not self._has_sx
self._data_json.columns = ['x', 'y', 'sy']
df["sx"] = 0.
else:
try:
self._data_json.columns = ['x', 'y', 'sy', 'sx']
uniqueSi = self._data_json["sy"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em y, removendo coluna de sy.")
self._has_sy = False
uniqueSi = self._data_json["sx"].unique().astype(float)
if 0. in uniqueSi:
if len(uniqueSi) > 1:
self._msgHandler.raiseWarn("Um valor nulo foi encontrado nas incertezas em x, removendo coluna de sx.")
self._has_sx = False
except ValueError:
self._msgHandler.raiseError("Há mais do que 4 colunas. Rever entrada de dados.")
return None
self._data = deepcopy(df)
self._has_data = True
self.uploadData.emit(self._data_json.to_dict(orient='list'), fileName)
def set_p0(self, p0):
''' Coloca os chutes iniciais. '''
self._p0 = p0.split(",")
def set_expression(self, exp = "", varInd = "x"):
""" Set new expression to model. """
self._exp_model = exp
self._indVar = varInd
def fit(self, **kargs):
''' Interpretador de qual ajuste deve ser feito. '''
wsx = kargs.pop("wsx", True)
wsy = kargs.pop("wsy", True)
# Getting Model
try:
self._model = ExpressionModel(self._exp_model + " + 0*%s"%self._indVar, independent_vars=[self._indVar])
except ValueError:
self._msgHandler.raiseError("Expressão de ajuste escrita de forma errada. Rever função de ajuste.")
return None
except SyntaxError:
self._msgHandler.raiseError("Expressão de ajuste escrita de forma errada. Rever função de ajuste.")
return None
# Getting coefficients
self._coef = [i for i in self._model.param_names]
# Data
x, y, sy, sx = self.data
indices = np.arange(len(self._data.index))
if self.xmin != self.xmax:
indices = np.where((self.xmin <= self._data["x"]) & (self.xmax >= self._data["x"]))[0]
x, y, sy, sx = x.iloc[indices], y.iloc[indices], sy.iloc[indices], sx.iloc[indices]
data = None
if self._has_sy and self._has_sx: # Caso com as duas incs
if wsx == True and wsy == True:
self.__fit_lm_wy(x, y)
if (self._result is None) == False:
self.__set_param_values_lm_special(x)
self.__set_report_lm_special(x)
else:
return None
elif wsx:
self.__fit_lm(x, y, sy)
if (self._result is None) == False:
self.__set_param_values_lm(x)
self.__set_report_lm(x)
else:
return None
elif wsy:
self.__fit_ODR_special(x, y, sx)
if (self._result is None) == False:
self.__set_param_values_ODR(x)
self.__set_report_ODR(x)
else:
return None
else:
data = RealData(x, y, sx = sx, sy = sy)
self.__fit_ODR(data)
if (self._result is None) == False:
self.__set_param_values_ODR(x)
self.__set_report_ODR(x)
else:
return None
elif self._has_sy: # Caso com a incerteza só em y
if wsy:
self.__fit_lm_wy(x, y)
if (self._result is None) == False:
self.__set_param_values_lm_special(x)
self.__set_report_lm_special(x)
else:
return None
else:
self.__fit_lm(x, y, sy)
if (self._result is None) == False:
self.__set_param_values_lm(x)
self.__set_report_lm(x)
else:
return None
elif self._has_sx: # Caso com a incerteza só em x
if wsx:
self.__fit_lm_wy(x, y)
if (self._result is None) == False:
self.__set_param_values_lm_special(x)
self.__set_report_lm_special(x)
else:
return None
else:
self.__fit_ODR_special(x, y, sx)
if (self._result is None) == False:
self.__set_param_values_ODR(x)
self.__set_report_ODR(x)
else:
return None
else: # Caso sem incertezas
self.__fit_lm_wy(x, y)
if (self._result is None) == False:
self.__set_param_values_lm_special(x)
self.__set_report_lm_special(x)
else:
return None
params = self.get_params()
keys = list(params.keys())
for i in range(len(keys)):
self.fillParamsTable.emit(keys[i], params[keys[i]][0], params[keys[i]][1])
self.writeInfos.emit(self._report_fit)
def __fit_ODR(self, data):
'''Fit com ODR.'''
pi = [1.]*len(self._coef)
fixed = [1]*len(self._coef)
aux = {c : i for i, c in enumerate(self._coef)}
if self._p0 | |
<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ConnectionArgs', 'Connection']
@pulumi.input_type
class ConnectionArgs:
def __init__(__self__, *,
instance_id: pulumi.Input[str],
connection_prefix: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Connection resource.
:param pulumi.Input[str] instance_id: The Id of instance that can run database.
:param pulumi.Input[str] connection_prefix: Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
:param pulumi.Input[str] port: Internet connection port. Valid value: [3200-3999]. Default to 3306.
"""
pulumi.set(__self__, "instance_id", instance_id)
if connection_prefix is not None:
pulumi.set(__self__, "connection_prefix", connection_prefix)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
"""
The Id of instance that can run database.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="connectionPrefix")
def connection_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
"""
return pulumi.get(self, "connection_prefix")
@connection_prefix.setter
def connection_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_prefix", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
Internet connection port. Valid value: [3200-3999]. Default to 3306.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class _ConnectionState:
def __init__(__self__, *,
connection_prefix: Optional[pulumi.Input[str]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Connection resources.
:param pulumi.Input[str] connection_prefix: Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
:param pulumi.Input[str] connection_string: Connection instance string.
:param pulumi.Input[str] instance_id: The Id of instance that can run database.
:param pulumi.Input[str] ip_address: The ip address of connection string.
:param pulumi.Input[str] port: Internet connection port. Valid value: [3200-3999]. Default to 3306.
"""
if connection_prefix is not None:
pulumi.set(__self__, "connection_prefix", connection_prefix)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter(name="connectionPrefix")
def connection_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
"""
return pulumi.get(self, "connection_prefix")
@connection_prefix.setter
def connection_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_prefix", value)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[pulumi.Input[str]]:
"""
Connection instance string.
"""
return pulumi.get(self, "connection_string")
@connection_string.setter
def connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_string", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of instance that can run database.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip address of connection string.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
Internet connection port. Valid value: [3200-3999]. Default to 3306.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
class Connection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_prefix: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a connection resource to allocate an Internet connection string for instance.
> **NOTE:** Available in 1.48.0+
> **NOTE:** Each instance will allocate a intranet connection string automatically and its prefix is instance ID.
To avoid unnecessary conflict, please specified a internet connection prefix before applying the resource.
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
creation = config.get("creation")
if creation is None:
creation = "Gpdb"
name = config.get("name")
if name is None:
name = "gpdbConnectionBasic"
default_zones = alicloud.get_zones(available_resource_creation=creation)
default_network = alicloud.vpc.Network("defaultNetwork", cidr_block="172.16.0.0/16")
default_switch = alicloud.vpc.Switch("defaultSwitch",
vpc_id=default_network.id,
cidr_block="172.16.0.0/24",
zone_id=default_zones.zones[0].id)
default_instance = alicloud.gpdb.Instance("defaultInstance",
vswitch_id=default_switch.id,
engine="gpdb",
engine_version="4.3",
instance_class="gpdb.group.segsdx2",
instance_group_count="2",
description=name)
default_connection = alicloud.gpdb.Connection("defaultConnection",
instance_id=default_instance.id,
connection_prefix="testAbc")
```
## Import
AnalyticDB for PostgreSQL's connection can be imported using the id, e.g.
```sh
$ pulumi import alicloud:gpdb/connection:Connection example abc12345678
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] connection_prefix: Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
:param pulumi.Input[str] instance_id: The Id of instance that can run database.
:param pulumi.Input[str] port: Internet connection port. Valid value: [3200-3999]. Default to 3306.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a connection resource to allocate an Internet connection string for instance.
> **NOTE:** Available in 1.48.0+
> **NOTE:** Each instance will allocate a intranet connection string automatically and its prefix is instance ID.
To avoid unnecessary conflict, please specified a internet connection prefix before applying the resource.
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
creation = config.get("creation")
if creation is None:
creation = "Gpdb"
name = config.get("name")
if name is None:
name = "gpdbConnectionBasic"
default_zones = alicloud.get_zones(available_resource_creation=creation)
default_network = alicloud.vpc.Network("defaultNetwork", cidr_block="172.16.0.0/16")
default_switch = alicloud.vpc.Switch("defaultSwitch",
vpc_id=default_network.id,
cidr_block="172.16.0.0/24",
zone_id=default_zones.zones[0].id)
default_instance = alicloud.gpdb.Instance("defaultInstance",
vswitch_id=default_switch.id,
engine="gpdb",
engine_version="4.3",
instance_class="gpdb.group.segsdx2",
instance_group_count="2",
description=name)
default_connection = alicloud.gpdb.Connection("defaultConnection",
instance_id=default_instance.id,
connection_prefix="testAbc")
```
## Import
AnalyticDB for PostgreSQL's connection can be imported using the id, e.g.
```sh
$ pulumi import alicloud:gpdb/connection:Connection example abc12345678
```
:param str resource_name: The name of the resource.
:param ConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
connection_prefix: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionArgs.__new__(ConnectionArgs)
__props__.__dict__["connection_prefix"] = connection_prefix
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["port"] = port
__props__.__dict__["connection_string"] = None
__props__.__dict__["ip_address"] = None
super(Connection, __self__).__init__(
'alicloud:gpdb/connection:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
connection_prefix: Optional[pulumi.Input[str]] = None,
connection_string: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None) -> 'Connection':
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] connection_prefix: Prefix of an Internet connection string. It must be checked for uniqueness. It may consist of lowercase letters, numbers, and underlines, and must start with a letter and have no more than 30 characters. Default to <instance_id> + '-tf'.
:param pulumi.Input[str] connection_string: Connection instance string.
:param pulumi.Input[str] instance_id: The Id of instance that can run database.
:param pulumi.Input[str] ip_address: The ip address of connection string.
| |
= {
'server': {
'name': 'no-personality-files',
'imageRef': '6b0edabb-8cde-4684-a3f4-978960a51378',
'flavorRef': '2',
'networks': 'auto',
'personality': [{
'path': '/path/to/file',
'contents': 'ZWNobyAiaGVsbG8gd29ybGQi'
}]
}
}
req = fakes.HTTPRequestV21.blank('/servers', version='2.57')
req.body = jsonutils.dump_as_bytes(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
ex = self.assertRaises(
exception.ValidationError, controller.create, req, body=body)
self.assertIn('personality', six.text_type(ex))
@mock.patch('nova.compute.utils.check_num_instances_quota',
new=lambda *args, **kwargs: 1)
class ServersControllerCreateTestV260(test.NoDBTestCase):
"""Negative tests for creating a server with a multiattach volume."""
def setUp(self):
super(ServersControllerCreateTestV260, self).setUp()
self.useFixture(nova_fixtures.NoopQuotaDriverFixture())
self.controller = servers.ServersController()
get_flavor_mock = mock.patch(
'nova.compute.flavors.get_flavor_by_flavor_id',
return_value=fake_flavor.fake_flavor_obj(
context.get_admin_context(), flavorid='1'))
get_flavor_mock.start()
self.addCleanup(get_flavor_mock.stop)
reqspec_create_mock = mock.patch(
'nova.objects.RequestSpec.create')
reqspec_create_mock.start()
self.addCleanup(reqspec_create_mock.stop)
volume_get_mock = mock.patch(
'nova.volume.cinder.API.get',
return_value={'id': uuids.fake_volume_id, 'multiattach': True})
volume_get_mock.start()
self.addCleanup(volume_get_mock.stop)
def _post_server(self, version=None):
body = {
'server': {
'name': 'multiattach',
'flavorRef': '1',
'networks': 'none',
'block_device_mapping_v2': [{
'uuid': uuids.fake_volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': True}]
}
}
req = fakes.HTTPRequestV21.blank(
'/servers', version=version or '2.60')
req.body = jsonutils.dump_as_bytes(body)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
return self.controller.create(req, body=body)
def test_create_server_with_multiattach_fails_old_microversion(self):
"""Tests the case that the user tries to boot from volume with a
multiattach volume but before using microversion 2.60.
"""
self.useFixture(nova_fixtures.AllServicesCurrent())
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._post_server, '2.59')
self.assertIn('Multiattach volumes are only supported starting with '
'compute API version 2.60', six.text_type(ex))
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.MIN_COMPUTE_MULTIATTACH - 1)
def test_create_server_with_multiattach_fails_not_available(
self, mock_get_min_version_all_cells):
"""Tests the case that the user tries to boot from volume with a
multiattach volume but before the deployment is fully upgraded.
"""
ex = self.assertRaises(webob.exc.HTTPConflict, self._post_server)
self.assertIn('Multiattach volume support is not yet available',
six.text_type(ex))
class ServersControllerCreateTestV263(ServersControllerCreateTest):
def _create_instance_req(self, certs=None):
self.body['server']['trusted_image_certificates'] = certs
self.flags(verify_glance_signatures=True, group='glance')
self.flags(enable_certificate_validation=True, group='glance')
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.63')
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.MIN_COMPUTE_TRUSTED_CERTS)
def test_create_instance_with_trusted_certs(self, get_min_ver):
"""Test create with valid trusted_image_certificates argument"""
self._create_instance_req(
['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_without_trusted_certs(self):
"""Test create without trusted image certificates"""
self._create_instance_req()
# The fact that the action doesn't raise is enough validation
self.controller.create(self.req, body=self.body).obj
def test_create_instance_with_empty_trusted_cert_id(self):
"""Make sure we can't create with an empty certificate ID"""
self._create_instance_req([''])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_create_instance_with_empty_trusted_certs(self):
"""Make sure we can't create with an empty array of IDs"""
self.body['server']['trusted_image_certificates'] = []
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.63')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too short', six.text_type(ex))
def test_create_instance_with_too_many_trusted_certs(self):
"""Make sure we can't create with an array of >50 unique IDs"""
self._create_instance_req(['cert{}'.format(i) for i in range(51)])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is too long', six.text_type(ex))
def test_create_instance_with_nonunique_trusted_certs(self):
"""Make sure we can't create with a non-unique array of IDs"""
self._create_instance_req(['cert', 'cert'])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('has non-unique elements', six.text_type(ex))
def test_create_instance_with_invalid_trusted_cert_id(self):
"""Make sure we can't create with non-string certificate IDs"""
self._create_instance_req([1, 2])
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_create_instance_with_invalid_trusted_certs(self):
"""Make sure we can't create with certificates in a non-array"""
self._create_instance_req("not-an-array")
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('is not of type', six.text_type(ex))
def test_create_server_with_trusted_certs_pre_2_63_fails(self):
"""Make sure we can't use trusted_certs before 2.63"""
self._create_instance_req(['trusted-cert-id'])
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.62')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
def test_create_server_with_trusted_certs_policy_failed(self):
rule_name = "os_compute_api:servers:create:trusted_certs"
rules = {"os_compute_api:servers:create": "@",
"os_compute_api:servers:create:forced_host": "@",
"os_compute_api:servers:create:attach_volume": "@",
"os_compute_api:servers:create:attach_network": "@",
rule_name: "project:fake"}
self._create_instance_req(['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8'])
self.policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body=self.body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch.object(compute_api.API, 'create')
def test_create_server_with_cert_validation_error(
self, mock_create):
mock_create.side_effect = exception.CertificateValidationFailed(
cert_uuid="cert id", reason="test cert validation error")
self._create_instance_req(['trusted-cert-id'])
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req,
body=self.body)
self.assertIn('test cert validation error',
six.text_type(ex))
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.MIN_COMPUTE_TRUSTED_CERTS - 1)
def test_create_server_with_cert_validation_not_available(
self, mock_get_min_version_all_cells):
self._create_instance_req(['trusted-cert-id'])
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, self.req,
body=self.body)
self.assertIn('Image signature certificate validation support '
'is not yet available',
six.text_type(ex))
class ServersControllerCreateTestV267(ServersControllerCreateTest):
def setUp(self):
super(ServersControllerCreateTestV267, self).setUp()
self.block_device_mapping_v2 = [
{'uuid': '70a599e0-31e7-49b7-b260-868f441e862b',
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'volume_size': '1',
'volume_type': 'fake-lvm-1'
}]
def _test_create_extra(self, *args, **kwargs):
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.67')
return super(ServersControllerCreateTestV267, self)._test_create_extra(
*args, **kwargs)
@mock.patch('nova.objects.Service.get_minimum_version',
return_value=compute_api.MIN_COMPUTE_VOLUME_TYPE)
def test_create_server_with_trusted_volume_type_pre_2_67_fails(self,
get_min_ver):
"""Make sure we can't use volume_type before 2.67"""
self.body['server'].update(
{'block_device_mapping_v2': self.block_device_mapping_v2})
self.req.body = jsonutils.dump_as_bytes(self.block_device_mapping_v2)
self.req.api_version_request = \
api_version_request.APIVersionRequest('2.66')
ex = self.assertRaises(
exception.ValidationError, self.controller.create, self.req,
body=self.body)
self.assertIn("'volume_type' was unexpected", six.text_type(ex))
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.VolumeTypeNotFound(
id_or_name='fake-lvm-1'))
def test_create_instance_with_volume_type_not_found(self, mock_create):
"""Trying to boot from volume with a volume type that does not exist
will result in a 400 error.
"""
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertIn('Volume type fake-lvm-1 could not be found',
six.text_type(ex))
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=compute_api.MIN_COMPUTE_VOLUME_TYPE - 1)
def test_check_volume_type_new_inst_old_compute(self, get_min_version):
"""Trying to boot from volume with a volume_type but not all computes
are upgraded will result in a 409 error.
"""
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
ex = self.assertRaises(webob.exc.HTTPConflict,
self._test_create_extra, params)
self.assertIn('Volume type support is not yet available',
six.text_type(ex))
def test_create_instance_with_volume_type_empty_string(self):
"""Test passing volume_type='' which is accepted but not used."""
self.block_device_mapping_v2[0]['volume_type'] = ''
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_with_none_volume_type(self):
"""Test passing volume_type=None which is accepted but not used."""
self.block_device_mapping_v2[0]['volume_type'] = None
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_without_volume_type(self):
"""Test passing without volume_type which is accepted but not used."""
self.block_device_mapping_v2[0].pop('volume_type')
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
self._test_create_extra(params)
def test_create_instance_with_volume_type_too_long(self):
"""Tests the maxLength schema validation on volume_type."""
self.block_device_mapping_v2[0]['volume_type'] = 'X' * 256
params = {'block_device_mapping_v2': self.block_device_mapping_v2}
ex = self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertIn('is too long', six.text_type(ex))
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTestWithMock, self).setUp()
self.flags(enable_instance_password=True, group='api')
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
self.controller = servers.ServersController()
self.body = {
'server': {
'name': 'server_test',
'imageRef': self.image_uuid,
'flavorRef': self.flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
self.req = fakes.HTTPRequest.blank('/fake/servers')
self.req.method = 'POST'
self.req.headers["content-type"] = "application/json"
def _test_create_extra(self, params, no_image=False):
self.body['server']['flavorRef'] = 2
if no_image:
self.body['server'].pop('imageRef', None)
self.body['server'].update(params)
self.req.body = jsonutils.dump_as_bytes(self.body)
self.req.headers["content-type"] = "application/json"
self.controller.create(self.req, body=self.body).obj['server']
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
create_mock):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
create_mock.side_effect = exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=network)
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params)
self.assertEqual(1, len(create_mock.call_args_list))
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_neutronv2_invalid_fixed_ip(self,
create_mock):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '999.0.2.3'
requested_networks = [{'uuid': network, 'fixed_ip': address}]
params = {'networks': requested_networks}
self.assertRaises(exception.ValidationError,
self._test_create_extra, params)
self.assertFalse(create_mock.called)
@mock.patch.object(compute_api.API, 'create',
side_effect=exception.InvalidVolume(reason='error'))
def test_create_instance_with_invalid_volume_error(self, create_mock):
# Tests that InvalidVolume is translated to a 400 error.
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, {})
class ServersViewBuilderTest(test.TestCase):
def setUp(self):
super(ServersViewBuilderTest, self).setUp()
self.flags(use_ipv6=True)
# Neutron security groups are tested in test_neutron_security_groups.py
self.flags(use_neutron=False)
fakes.stub_out_nw_api(self)
self.flags(group='glance', api_servers=['http://localhost:9292'])
nw_cache_info = self._generate_nw_cache_info()
db_inst = fakes.stub_instance(
id=1,
image_ref="5",
uuid=FAKE_UUID,
display_name="test_server",
include_fake_metadata=False,
availability_zone='nova',
nw_cache=nw_cache_info,
launched_at=None,
terminated_at=None,
security_groups=[
{'name': 'fake-0-0', 'id': 1, 'description': 'foo',
'user_id': 'bar', 'project_id': 'baz', 'deleted': False,
'deleted_at': None, 'updated_at': None, 'created_at': None},
{'name': 'fake-0-1', 'id': 1, 'description': 'foo',
'user_id': 'bar', 'project_id': 'baz', 'deleted': False,
'deleted_at': None, 'updated_at': None, 'created_at': None}],
task_state=None,
vm_state=vm_states.ACTIVE,
power_state=1)
privates = ['172.19.0.1']
publics = ['192.168.0.3']
public6s = ['fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:ddff:fecc:bbaa']
def nw_info(*args, **kwargs):
return [(None, {'label': 'public',
'ips': [dict(ip=ip) for ip in publics],
'ip6s': [dict(ip=ip) for ip in public6s]}),
(None, {'label': 'private',
'ips': [dict(ip=ip) for ip in privates]})]
fakes.stub_out_nw_api_get_instance_nw_info(self, nw_info)
self.stub_out('nova.db.api.'
'block_device_mapping_get_all_by_instance_uuids',
fake_bdms_get_all_by_instance_uuids)
self.stub_out('nova.objects.InstanceMappingList.'
'_get_by_instance_uuids_from_db',
fake_get_inst_mappings_by_instance_uuids_from_db)
self.uuid = db_inst['uuid']
self.view_builder = views.servers.ViewBuilder()
self.request = fakes.HTTPRequestV21.blank("/fake")
self.request.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(
self.request.context,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
**db_inst)
self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
def _generate_nw_cache_info(self):
fixed_ipv4 = ('192.168.1.100', '192.168.2.100', '192.168.3.100')
fixed_ipv6 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(fixed_ipv4[0])]},
{'cidr': 'b33f::/64',
'ips': [_ip(fixed_ipv6[0])]}]}},
{'address': 'bb:bb:bb:bb:bb:bb',
'id': 2,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.2.0/24',
'ips': [_ip(fixed_ipv4[1])]}]}},
{'address': 'cc:cc:cc:cc:cc:cc',
'id': 3,
'network': {'bridge': 'br0',
'id': 2,
'label': 'test2',
'subnets': [{'cidr': '192.168.3.0/24',
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
def test_get_flavor_valid_instance_type(self):
flavor_bookmark = "http://localhost/fake/flavors/1"
expected = {"id": "1",
"links": [{"rel": "bookmark",
"href": flavor_bookmark}]}
result = self.view_builder._get_flavor(self.request, self.instance,
False)
self.assertEqual(result, expected)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_volumes_attached_with_faily_cells(self, mock_sg):
bdms = fake_bdms_get_all_by_instance_uuids()
# just faking a nova list scenario
mock_sg.return_value = {
uuids.cell1: bdms[0],
uuids.cell2: exception.BDMNotFound(id='fake')
}
ctxt = context.RequestContext('fake', 'fake')
result = self.view_builder._get_instance_bdms_in_multiple_cells(
ctxt, [self.instance.uuid])
# will get the result from cell1
self.assertEqual(result, bdms[0])
mock_sg.assert_called_once()
def test_build_server(self):
expected_server = {
"server": {
"id": self.uuid,
"name": "test_server",
"links": [
{
"rel": "self",
"href": self.self_link,
},
{
"rel": "bookmark",
"href": self.bookmark_link,
},
],
}
}
| |
"""
usage: train.py [options]
options:
--config=<path> path of the config file
--checkpoint-dir=<dir> directory where to save checkpoint
--checkpoint=<path> path of the checkpoint from which the model is restored
--weight-loss=<list> list of weights in loss function
--loss=<string> names of loss function
--stride-mode=<int> 0, 1 or 2. 0: stride is 2; 1: stride is 1 & 2, changes in each layer; 2: stride is 1
--target=<string> cIRM, IRM, PSM, spec, the training target (output) of the model
--help show this help message
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
from docopt import docopt
import logging
import json
import os
import sys
import pdb
import Model
import dataset_5R
import preprocess
import util
import inference
import evaluation
class WPMLoss(nn.Module): #weighted phase magnitude loss
def __init__(self, weight):
super(WPMLoss, self).__init__()
self.weight = weight
def forward(self, y_real, y_imag, y_real_hat, y_imag_hat):
pi = torch.FloatTensor([np.pi]).cuda()
mag = torch.sqrt(y_real**2 + y_imag**2)
mag_hat = torch.sqrt(y_real_hat**2 + y_imag_hat**2)
theta = torch.atan2(y_imag, y_real)
theta_hat = torch.atan2(y_imag_hat, y_real_hat)
dif_theta = 2 * mag * torch.sin((theta_hat - theta)/2) #0 <= dif_thera <= 2*mag
#cos_theta = y_real / (torch.sqrt(y_real**2 + y_imag**2)+1e-8)
#sin_theta = y_imag / (torch.sqrt(y_real**2 + y_imag**2)+1e-8)
#cos_theta_hat = y_real_hat / (torch.sqrt(y_real_hat**2 + y_imag_hat**2)+1e-8)
#sin_theta_hat = y_imag_hat / (torch.sqrt(y_real_hat**2 + y_imag_hat**2)+1e-8)
#cos_dif_theta = cos_theta * cos_theta_hat + sin_theta * sin_theta_hat
#sin_half_dif_theta_squared = (1 - cos_dif_theta) / 2
dif_mag = mag_hat - mag
loss = torch.mean(dif_mag**2 + self.weight * dif_theta**2)
if torch.isnan(loss).any():
np.save('y_real.npy', y_real.data.cpu().numpy())
np.save('y_imag.npy', y_imag.data.cpu().numpy())
np.save('y_real_hat.npy', y_real_hat.data.cpu().numpy())
np.save('y_imag_hat.npy', y_imag_hat.data.cpu().numpy())
raise ValueError("NAN encountered in loss")
return loss
def plot_loss(loss_train, loss_test, loss_mag_train, loss_mag_test, loss_phase_train, loss_phase_test, loss_angle_train, loss_angle_test, path):
fig = plt.figure(figsize = (12, 6))
ax1, ax2 = fig.subplots(1,2)
ax1.plot(loss_train)
ax1.set_title("loss_train")
ax1.set_xlabel("epoches")
ax1.set_ylabel("loss")
ax2.plot(loss_test)
ax2.set_title("loss_test")
ax2.set_xlabel("epoches")
ax2.set_ylabel("loss")
fig.savefig(os.path.join(path, "loss.png"))
plt.close(fig)
fig = plt.figure(figsize = (12, 6))
ax1, ax2 = fig.subplots(1,2)
ax1.plot(loss_mag_train)
ax1.set_title("loss_train_magnitude")
ax1.set_xlabel("epoches")
ax1.set_ylabel("loss")
ax2.plot(loss_mag_test)
ax2.set_title("loss_test_magnitude")
ax2.set_xlabel("epoches")
ax2.set_ylabel("loss")
fig.savefig(os.path.join(path, "loss_magnitude.png"))
plt.close(fig)
fig = plt.figure(figsize = (12, 6))
ax1, ax2 = fig.subplots(1,2)
ax1.plot(loss_phase_train)
ax1.set_title("loss_train_phase")
ax1.set_xlabel("epoches")
ax1.set_ylabel("loss")
ax2.plot(loss_phase_test)
ax2.set_title("loss_test_phase")
ax2.set_xlabel("epoches")
ax2.set_ylabel("loss")
fig.savefig(os.path.join(path, "loss_phase.png"))
plt.close(fig)
fig = plt.figure(figsize = (12, 6))
ax1, ax2 = fig.subplots(1,2)
ax1.plot(loss_angle_train / np.pi)
ax1.set_title("loss_train_angle")
ax1.set_xlabel("epoches")
ax1.set_ylabel("loss")
ax2.plot(loss_angle_test / np.pi)
ax2.set_title("loss_test_angle")
ax2.set_xlabel("epoches")
ax2.set_ylabel("loss / pi")
fig.savefig(os.path.join(path, "loss_angle.png"))
plt.close(fig)
def save_checkpoint(checkpoint_save_path, model, optimizer = None, filename = None):
global loss_train, loss_test
global loss_mag_train, loss_mag_test, loss_phase_train, loss_phase_test, loss_angle_train, loss_angle_test
global global_step, global_epoch
if filename is None:
checkpoint_path = os.path.join(checkpoint_save_path, "checkpoint_step{:09d}.pth".format(global_step))
else:
checkpoint_path = os.path.join(checkpoint_save_path, filename)
optimizer_state = optimizer.state_dict()
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer_state,
"global_step": global_step,
"global_epoch": global_epoch,
"loss_train": loss_train,
"loss_test": loss_test,
"loss_mag_train": loss_mag_train,
"loss_mag_test": loss_mag_test,
"loss_phase_train": loss_phase_train,
"loss_phase_test": loss_phase_test,
"loss_angle_train": loss_angle_train,
"loss_angle_test": loss_angle_test
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
def load_checkpoint(path, model, optimizer):
global loss_train, loss_test
global loss_mag_train, loss_mag_test, loss_phase_train, loss_phase_test, loss_angle_train, loss_angle_test
global global_step, global_epoch
logger.info("Load checkpoint from: {}".format(path))
checkpoint = torch.load(path)
try:
model.load_state_dict(checkpoint["state_dict"])
except RuntimeError:
logger.info("Failed to load checkpoint from: " + path)
return False
#if optimizer_state is not None:
# print("Load optimizer state from {}".format(path))
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
loss_train = checkpoint["loss_train"]
loss_test = checkpoint["loss_test"]
loss_mag_train = checkpoint["loss_mag_train"]
loss_mag_test = checkpoint["loss_mag_test"]
loss_phase_train = checkpoint["loss_phase_train"]
loss_phase_test = checkpoint["loss_phase_test"]
loss_angle_train = checkpoint["loss_angle_train"]
loss_angle_test = checkpoint["loss_angle_test"]
logger.info("checkpoint loaded successfully from: " + path)
return True
def save_result(out_path, y_hat=None, y_target=None, spec_hat=None, spec_target=None, mask_hat = None, mask = None, phase=None, config=None):
global global_step, global_epoch
os.makedirs(out_path, exist_ok = True)
if y_hat is not None:
path = os.path.join(out_path, "epoch{:04d}_step{:07d}_{}_predicted.wav".format(
global_epoch, global_step, phase))
librosa.output.write_wav(path, y_hat, sr = config["data"]["sample_rate"])
if y_target is not None:
path = os.path.join(out_path, "epoch{:04d}_step{:07d}_{}_target.wav".format(
global_epoch, global_step, phase))
librosa.output.write_wav(path, y_target, sr = config["data"]["sample_rate"])
if spec_hat is not None and spec_target is not None:
fig = plt.figure(figsize = (12, 18))
axes = fig.subplots(3,2)
axes[0,0] = librosa.display.specshow(np.log10(np.abs(spec_target)+1e-8), ax = axes[0,0])
plt.colorbar(axes[0,0].get_children()[0], ax = axes[0,0])
axes[0,0].set_title("magnitude_target")
axes[0,1] = librosa.display.specshow(np.log10(np.abs(spec_hat)+1e-8), ax = axes[0,1])
plt.colorbar(axes[0,1].get_children()[0], ax = axes[0,1])
axes[0,1].set_title("magnitude_predicted")
axes[1,0] = librosa.display.specshow(np.real(spec_target), ax = axes[1,0])
plt.colorbar(axes[1,0].get_children()[0], ax = axes[1,0])
axes[1,0].set_title("spec_real_target")
axes[1,1] = librosa.display.specshow(np.real(spec_hat), ax = axes[1,1])
plt.colorbar(axes[1,1].get_children()[0], ax = axes[1,1])
axes[1,1].set_title("spec_real_predicted")
axes[2,0] = librosa.display.specshow(np.imag(spec_target), ax = axes[2,0])
plt.colorbar(axes[2,0].get_children()[0], ax = axes[2,0])
axes[2,0].set_title("spec_imag_target")
axes[2,1] = librosa.display.specshow(np.imag(spec_hat), ax = axes[2,1])
plt.colorbar(axes[2,1].get_children()[0], ax = axes[2,1])
axes[2,1].set_title("spec_imag_predicted")
fig.savefig(os.path.join(out_path, "epoch{:04d}_step{:07d}_{}_spectrogram.png".format(
global_epoch, global_step, phase)))
plt.close(fig)
if mask_hat is not None and mask is not None:
if mask.dtype == np.complex64 or mask.dtype == np.complex:
fig = plt.figure(figsize = (12, 18))
axes = fig.subplots(3,2)
temp_r = util.target_compression(np.real(mask), config, device = 'cpu')
temp_i = util.target_compression(np.imag(mask), config, device = 'cpu')
mask_r = util.target_decompression(temp_r, config, device = 'cpu')
mask_i = util.target_decompression(temp_i, config, device = 'cpu')
mask_compressed = mask_r.cpu().numpy() + 1j * mask_i.cpu().numpy()
axes[0,0] = librosa.display.specshow(np.log10(np.abs(mask_compressed)+1e-8), ax = axes[0,0])
plt.colorbar(axes[0,0].get_children()[0], ax = axes[0,0])
axes[0,0].set_title("mask_target")
axes[0,1] = librosa.display.specshow(np.log10(np.abs(mask_hat)+1e-8), ax = axes[0,1])
plt.colorbar(axes[0,1].get_children()[0], ax = axes[0,1])
axes[0,1].set_title("mask_predicted")
axes[1,0] = librosa.display.specshow(np.real(mask_compressed), ax = axes[1,0])
plt.colorbar(axes[1,0].get_children()[0], ax = axes[1,0])
axes[1,0].set_title("mask_real_target")
axes[1,1] = librosa.display.specshow(np.real(mask_hat), ax = axes[1,1])
plt.colorbar(axes[1,1].get_children()[0], ax = axes[1,1])
axes[1,1].set_title("mask_real_predicted")
axes[2,0] = librosa.display.specshow(np.imag(mask_compressed), ax = axes[2,0])
plt.colorbar(axes[2,0].get_children()[0], ax = axes[2,0])
axes[2,0].set_title("mask_imag_target")
axes[2,1] = librosa.display.specshow(np.imag(mask_hat), ax = axes[2,1])
plt.colorbar(axes[2,1].get_children()[0], ax = axes[2,1])
axes[2,1].set_title("mask_imag_predicted")
else:
fig = plt.figure(figsize = (12, 12))
axes = fig.subplots(2,2)
mask = util.target_compression(mask, config, device = 'cpu')
mask = util.target_decompression(mask, config, device = 'cpu')
mask = mask.data.cpu().numpy()
axes[0,0] = librosa.display.specshow(np.log10(np.abs(mask)+1e-8), ax = axes[0,0])
plt.colorbar(axes[0,0].get_children()[0], ax = axes[0,0])
axes[0,0].set_title("mask_target_log")
axes[0,1] = librosa.display.specshow(np.log10(np.abs(mask_hat)+1e-8), ax = axes[0,1])
plt.colorbar(axes[0,1].get_children()[0], ax = axes[0,1])
axes[0,1].set_title("mask_predicted_log")
axes[1,0] = librosa.display.specshow(mask, ax = axes[1,0])
plt.colorbar(axes[1,0].get_children()[0], ax = axes[1,0])
axes[1,0].set_title("mask_target_linear")
axes[1,1] = librosa.display.specshow(mask_hat, ax = axes[1,1])
plt.colorbar(axes[1,1].get_children()[0], ax = axes[1,1])
axes[1,1].set_title("mask_predicted_linear")
fig.savefig(os.path.join(out_path, "epoch{:04d}_step{:07d}_{}_mask.png".format(
global_epoch, global_step, phase)))
plt.close(fig)
#save_spectrogram_plot(path, y_hat, y_target, config["dataset"]["sample_rate"])
def load_config(config_filepath):
config_file = open(config_filepath, 'r')
with config_file:
return json.load(config_file)
def setHandler(filename = "/vol/vssp/msos/jz/complex_nn/checkpoint/train.log"):
handler = logging.FileHandler(filename = filename, mode = 'a')
format = '%(asctime)s-[%(levelname)s]: %(message)s'
datefmt='%m/%d/%Y %I:%M:%S %p'
formatter = logging.Formatter(fmt = format, datefmt = datefmt)
handler.setFormatter(formatter)
return handler
def do_eval(model, wav_clean, wav_reverb, config):
model.eval()
sample_rate = config["data"]["sample_rate"]
batch_size = 1
#audio_file = audio_filelist[np.random.randint(0, len(audio_filelist))]
#indice = np.random.randint(0, len(sample_filelist))
spec_clean = util.wav_to_spectrogram(wav_clean, config['data'])
spec_reverb = util.wav_to_spectrogram(wav_reverb, config['data']) #torch tensors
length = spec_clean.size(2)
if config["training"]["target"] == "cIRM":
spec_clean = spec_clean.data.numpy()
spec_reverb = spec_reverb.data.numpy()
mask = (spec_clean[:,0,:,:] + 1j*spec_clean[:,1,:,:]) / (spec_reverb[:,0,:,:]+1e-8 + 1j*spec_reverb[:,1,:,:])
mask_real = np.real(mask).reshape(batch_size, 1, length, -1)
mask_imag = np.imag(mask).reshape(batch_size, 1, length, -1)
mask = np.concatenate((mask_real, mask_imag), axis = 1)
mask = torch.FloatTensor(mask)
spec_clean = torch.FloatTensor(spec_clean)
spec_reverb = torch.FloatTensor(spec_reverb)
elif config["training"]["target"] == "IRM":
mag_clean = torch.sqrt(spec_clean[:,0,:,:]**2 + spec_clean[:,1,:,:]**2)
mag_reverb = torch.sqrt(spec_reverb[:,0,:,:]**2 + spec_reverb[:,1,:,:]**2)
mask =torch.reshape(mag_clean / (mag_reverb+1e-8), (batch_size, 1, length, -1))
#mask = util.target_compression(mask, self.config)
elif config["training"]["target"] == "PSM":
spec_clean = spec_clean.data.numpy()
spec_reverb = spec_reverb.data.numpy()
mask = (spec_clean[:,0,:,:] + 1j*spec_clean[:,1,:,:]) / (spec_reverb[:,0,:,:]+1e-8 + 1j*spec_reverb[:,1,:,:])
mask = np.real(mask).reshape(batch_size, 1, length, -1)
mask = torch.FloatTensor(mask)
spec_clean = torch.FloatTensor(spec_clean)
spec_reverb = torch.FloatTensor(spec_reverb)
#mask = util.target_compression(mask, self.config)
if config["training"]["input"] == "spec":
x = spec_reverb
elif config["training"]["input"] == "mag":
x = torch.sqrt(spec_reverb[:,0,:,:]**2 + spec_reverb[:,1,:,:]**2)
x = torch.reshape(x, (batch_size, 1, length, -1))
elif config["training"]["input"] == "mag_log":
x = torch.sqrt(spec_reverb[:,0,:,:]**2 + spec_reverb[:,1,:,:]**2)
x = torch.reshape(torch.log10(x + 1e-8), (batch_size, 1, length, -1))
input = torch.FloatTensor(x).cuda()
input = util.normalization_0m_1v(input, axis = [1,3], device = 'cuda')
#if config["normalization"]["input"] == "feature-wise":
# features = util.normalization_0m_1v(features, axis = 1)
model.eval()
if input.size(2) % 2 == 0:
input = input[:,:,0:-1,:]
spec_clean = spec_clean[:, :, 0:-1, :]
spec_reverb = spec_reverb[:, :, 0:-1, :]
if config["training"]["target"] == "cIRM":
mask_temp = torch.nn.parallel.data_parallel(model, input) #shape: (n_frame * n_fftbins)
mask_real = util.target_decompression(mask_temp.data[:,0,:,:].cpu(), config, device = 'cuda')
mask_imag = util.target_decompression(mask_temp.data[:,1,:,:].cpu(), config, device = 'cuda')
mask_hat = mask_real.squeeze().cpu().numpy() + 1j * mask_imag.squeeze().cpu().numpy()
elif config["training"]["target"] == "IRM" or config["training"]["target"] == "PSM":
mask_hat = torch.nn.parallel.data_parallel(model, input)
mask_hat = util.target_decompression(mask_hat.data.cpu(), config, device = 'cuda')
mask_hat = mask_hat.squeeze().cpu().numpy()
elif config["training"]["target"] == "spec":
spec_temp = torch.nn.parallel.data_parallel(model, input)
spec_hat = spec_temp[:,0,:,:].data.squeeze().cpu().numpy() + 1j * spec_temp[:,1,:,:].data.squeeze().cpu().numpy()
#spec_target = spec_target[:, half_frame_window: -half_frame_window] / config["statistic"]["clean"]["energy"]
#spec_hat = spec_reverb[half_frame_window:-half_frame_window, :].T / config["statistic"]["reverb"]["energy"] * mask_hat
#spec_target = spec_target[:, half_frame_window: -half_frame_window]
spec_clean = spec_clean.data[:,0,:,:].numpy() + 1j * spec_clean.data[:,1,:,:].numpy()
spec_reverb = spec_reverb.data[:,0,:,:].numpy() + 1j * spec_reverb.data[:,1,:,:].numpy()
spec_clean = spec_clean.squeeze()
spec_reverb = spec_reverb.squeeze()
if config["training"]["target"] == "cIRM":
mask = mask.data[:,0,:,:].squeeze().numpy() + 1j * mask.data[:,1,:,:].squeeze().numpy()
spec_hat = spec_reverb * mask_hat
elif config["training"]["target"] == "IRM" or config["training"]["target"] == "PSM":
mask = mask.data.squeeze().numpy()
spec_hat = spec_reverb * mask_hat
elif config["training"]["target"] == "spec":
mask_hat = spec_hat
mask = spec_clean
spec_reverb = spec_reverb.T
spec_clean = spec_clean.T
spec_hat = spec_hat.T
wav_reconstructed = util.audio_reconstruct(spec_hat, config)
wav_target = util.audio_reconstruct(spec_clean, config)
return wav_target, wav_reconstructed, spec_clean, spec_hat, mask, mask_hat
def train(model, optimizer, criterion, | |
None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 15"]
@distance_from_top_of_thermal_chimney_to_inlet_15.setter
def distance_from_top_of_thermal_chimney_to_inlet_15(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 15`"""
self["Distance from Top of Thermal Chimney to Inlet 15"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_15(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 15`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 15`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_15` or None if not set
"""
return self[
"Relative Ratios of Air Flow Rates Passing through Zone 15"]
@relative_ratios_of_air_flow_rates_passing_through_zone_15.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_15(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 15`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 15"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_15(self):
"""field `Cross Sectional Areas of Air Channel Inlet 15`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 15`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_15` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 15"]
@cross_sectional_areas_of_air_channel_inlet_15.setter
def cross_sectional_areas_of_air_channel_inlet_15(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
15`"""
self["Cross Sectional Areas of Air Channel Inlet 15"] = value
@property
def zone_16_name(self):
"""field `Zone 16 Name`
Args:
value (str): value for IDD Field `Zone 16 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_16_name` or None if not set
"""
return self["Zone 16 Name"]
@zone_16_name.setter
def zone_16_name(self, value=None):
"""Corresponds to IDD field `Zone 16 Name`"""
self["Zone 16 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_16(self):
"""field `Distance from Top of Thermal Chimney to Inlet 16`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 16`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_16` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 16"]
@distance_from_top_of_thermal_chimney_to_inlet_16.setter
def distance_from_top_of_thermal_chimney_to_inlet_16(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 16`"""
self["Distance from Top of Thermal Chimney to Inlet 16"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_16(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 16`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 16`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_16` or None if not set
"""
return self[
"Relative Ratios of Air Flow Rates Passing through Zone 16"]
@relative_ratios_of_air_flow_rates_passing_through_zone_16.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_16(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 16`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 16"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_16(self):
"""field `Cross Sectional Areas of Air Channel Inlet 16`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 16`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_16` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 16"]
@cross_sectional_areas_of_air_channel_inlet_16.setter
def cross_sectional_areas_of_air_channel_inlet_16(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
16`"""
self["Cross Sectional Areas of Air Channel Inlet 16"] = value
@property
def zone_17_name(self):
"""field `Zone 17 Name`
Args:
value (str): value for IDD Field `Zone 17 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_17_name` or None if not set
"""
return self["Zone 17 Name"]
@zone_17_name.setter
def zone_17_name(self, value=None):
"""Corresponds to IDD field `Zone 17 Name`"""
self["Zone 17 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_17(self):
"""field `Distance from Top of Thermal Chimney to Inlet 17`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 17`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_17` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 17"]
@distance_from_top_of_thermal_chimney_to_inlet_17.setter
def distance_from_top_of_thermal_chimney_to_inlet_17(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 17`"""
self["Distance from Top of Thermal Chimney to Inlet 17"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_17(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 17`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 17`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_17` or None if not set
"""
return self[
"Relative Ratios of Air Flow Rates Passing through Zone 17"]
@relative_ratios_of_air_flow_rates_passing_through_zone_17.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_17(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 17`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 17"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_17(self):
"""field `Cross Sectional Areas of Air Channel Inlet 17`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 17`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_17` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 17"]
@cross_sectional_areas_of_air_channel_inlet_17.setter
def cross_sectional_areas_of_air_channel_inlet_17(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
17`"""
self["Cross Sectional Areas of Air Channel Inlet 17"] = value
@property
def zone_18_name(self):
"""field `Zone 18 Name`
Args:
value (str): value for IDD Field `Zone 18 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_18_name` or None if not set
"""
return self["Zone 18 Name"]
@zone_18_name.setter
def zone_18_name(self, value=None):
"""Corresponds to IDD field `Zone 18 Name`"""
self["Zone 18 Name"] = value
@property
def distance_from_top_of_thermal_chimney_to_inlet_18(self):
"""field `Distance from Top of Thermal Chimney to Inlet 18`
| Units: m
Args:
value (float): value for IDD Field `Distance from Top of Thermal Chimney to Inlet 18`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `distance_from_top_of_thermal_chimney_to_inlet_18` or None if not set
"""
return self["Distance from Top of Thermal Chimney to Inlet 18"]
@distance_from_top_of_thermal_chimney_to_inlet_18.setter
def distance_from_top_of_thermal_chimney_to_inlet_18(self, value=None):
"""Corresponds to IDD field `Distance from Top of Thermal Chimney to
Inlet 18`"""
self["Distance from Top of Thermal Chimney to Inlet 18"] = value
@property
def relative_ratios_of_air_flow_rates_passing_through_zone_18(self):
"""field `Relative Ratios of Air Flow Rates Passing through Zone 18`
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Ratios of Air Flow Rates Passing through Zone 18`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_ratios_of_air_flow_rates_passing_through_zone_18` or None if not set
"""
return self[
"Relative Ratios of Air Flow Rates Passing through Zone 18"]
@relative_ratios_of_air_flow_rates_passing_through_zone_18.setter
def relative_ratios_of_air_flow_rates_passing_through_zone_18(
self,
value=None):
"""Corresponds to IDD field `Relative Ratios of Air Flow Rates Passing
through Zone 18`"""
self[
"Relative Ratios of Air Flow Rates Passing through Zone 18"] = value
@property
def cross_sectional_areas_of_air_channel_inlet_18(self):
"""field `Cross Sectional Areas of Air Channel Inlet 18`
| Units: m2
Args:
value (float): value for IDD Field `Cross Sectional Areas of Air Channel Inlet 18`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `cross_sectional_areas_of_air_channel_inlet_18` or None if not set
"""
return self["Cross Sectional Areas of Air Channel Inlet 18"]
@cross_sectional_areas_of_air_channel_inlet_18.setter
def cross_sectional_areas_of_air_channel_inlet_18(self, value=None):
"""Corresponds to IDD field `Cross Sectional Areas of Air Channel Inlet
18`"""
self["Cross Sectional Areas of Air Channel Inlet 18"] = value
@property
def zone_19_name(self):
"""field `Zone 19 Name`
Args:
value (str): value for IDD Field `Zone 19 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_19_name` or None if not set
"""
return self["Zone 19 Name"]
@zone_19_name.setter
def zone_19_name(self, value=None):
| |
self.rpnLsb[channel]==self.rpnMsb[channel]==0:
self.semitoneRange[channel]=value
def patch_change(self, channel, patch): pass
def channel_pressure(self, channel, pressure): pass
def pitch_bend(self, channel, value):
# Pitch bend is sometimes used for slurs
# so we'd better interpret it (only MSB for now; full range is over 8192)
self.semitonesAdd[channel] = (value-64)*self.semitoneRange[channel]/64.0
def sysex_event(self, data): pass
def midi_time_code(self, msg_type, values): pass
def song_position_pointer(self, value): pass
def song_select(self, songNumber): pass
def tuning_request(self): pass
def header(self, format=0, nTracks=1, division=96):
self.division=division
self.need_to_interleave_tracks = (format==1)
self.tracks = [[]][:]
def eof(self):
if self.need_to_interleave_tracks:
while True: # delete empty tracks
try: self.tracks.remove([])
except ValueError: break
while self.tracks:
minLen = min([t[0][1] for t in self.tracks])
d = {}
for t in self.tracks: d.update([(n,1) for n in t[0][0]])
dedup_midi_note_chord(d.keys(),minLen)
for t in self.tracks:
t[0][1] -= minLen
if t[0][1]==0: del t[0]
while True: # delete empty tracks
try: self.tracks.remove([])
except ValueError: break
def meta_event(self, meta_type, data): pass
def start_of_track(self, n_track=0):
self.reset_time()
self._current_track += 1
if self.need_to_interleave_tracks: self.tracks.append([])
def end_of_track(self): pass
def sequence_number(self, value): pass
def text(self, text): pass
def copyright(self, text): pass
def sequence_name(self, text): pass
def instrument_name(self, text): pass
def lyric(self, text): pass
def marker(self, text): pass
def cuepoint(self, text): pass
def program_name(self,progname): pass
def device_name(self,devicename): pass
def midi_ch_prefix(self, channel): pass
def midi_port(self, value): pass
def tempo(self, value):
# TODO if need_to_interleave_tracks, and tempo is not already put in on all tracks, and there's a tempo command that's not at the start and/or not on 1st track, we may need to do something
self.microsecsPerDivision = value/self.division
def smtp_offset(self, hour, minute, second, frame, framePart): pass
def time_signature(self, nn, dd, cc, bb): pass
def key_signature(self, sf, mi): pass
def sequencer_specific(self, data): pass
class RawInstreamFile:
def __init__(self, infile=''):
if infile:
if isinstance(infile, StringType):
infile = open(infile, 'rb')
self.data = infile.read()
infile.close()
else:
self.data = infile.read()
else:
self.data = ''
self.cursor = 0
def setData(self, data=''):
self.data = data
def setCursor(self, position=0):
self.cursor = position
def getCursor(self):
return self.cursor
def moveCursor(self, relative_position=0):
self.cursor += relative_position
def nextSlice(self, length, move_cursor=1):
c = self.cursor
slc = self.data[c:c+length]
if move_cursor:
self.moveCursor(length)
return slc
def readBew(self, n_bytes=1, move_cursor=1):
return readBew(self.nextSlice(n_bytes, move_cursor))
def readVarLen(self):
MAX_VARLEN = 4
var = readVar(self.nextSlice(MAX_VARLEN, 0))
self.moveCursor(varLen(var))
return var
class EventDispatcher:
def __init__(self, outstream):
self.outstream = outstream
self.convert_zero_velocity = 1
self.dispatch_continuos_controllers = 1
self.dispatch_meta_events = 1
def header(self, format, nTracks, division):
self.outstream.header(format, nTracks, division)
def start_of_track(self, current_track):
self.outstream.set_current_track(current_track)
self.outstream.start_of_track(current_track)
def sysex_event(self, data):
self.outstream.sysex_event(data)
def eof(self):
self.outstream.eof()
def update_time(self, new_time=0, relative=1):
self.outstream.update_time(new_time, relative)
def reset_time(self):
self.outstream.reset_time()
def channel_messages(self, hi_nible, channel, data):
stream = self.outstream
data = toBytes(data)
if (NOTE_ON & 0xF0) == hi_nible:
note, velocity = data
if velocity==0 and self.convert_zero_velocity:
stream.note_off(channel, note, 0x40)
else:
stream.note_on(channel, note, velocity)
elif (NOTE_OFF & 0xF0) == hi_nible:
note, velocity = data
stream.note_off(channel, note, velocity)
elif (AFTERTOUCH & 0xF0) == hi_nible:
note, velocity = data
stream.aftertouch(channel, note, velocity)
elif (CONTINUOUS_CONTROLLER & 0xF0) == hi_nible:
controller, value = data
if self.dispatch_continuos_controllers:
self.continuous_controllers(channel, controller, value)
else:
stream.continuous_controller(channel, controller, value)
elif (PATCH_CHANGE & 0xF0) == hi_nible:
program = data[0]
stream.patch_change(channel, program)
elif (CHANNEL_PRESSURE & 0xF0) == hi_nible:
pressure = data[0]
stream.channel_pressure(channel, pressure)
elif (PITCH_BEND & 0xF0) == hi_nible:
hibyte, lobyte = data
value = (hibyte<<7) + lobyte
stream.pitch_bend(channel, value)
else:
raise ValueError, 'Illegal channel message!'
def continuous_controllers(self, channel, controller, value):
stream = self.outstream
stream.continuous_controller(channel, controller, value)
def system_commons(self, common_type, common_data):
stream = self.outstream
if common_type == MTC:
data = readBew(common_data)
msg_type = (data & 0x07) >> 4
values = (data & 0x0F)
stream.midi_time_code(msg_type, values)
elif common_type == SONG_POSITION_POINTER:
hibyte, lobyte = toBytes(common_data)
value = (hibyte<<7) + lobyte
stream.song_position_pointer(value)
elif common_type == SONG_SELECT:
data = readBew(common_data)
stream.song_select(data)
elif common_type == TUNING_REQUEST:
stream.tuning_request(time=None)
def meta_events(self, meta_type, data):
stream = self.outstream
if meta_type == SEQUENCE_NUMBER:
number = readBew(data)
stream.sequence_number(number)
elif meta_type == TEXT:
stream.text(data)
elif meta_type == COPYRIGHT:
stream.copyright(data)
elif meta_type == SEQUENCE_NAME:
stream.sequence_name(data)
elif meta_type == INSTRUMENT_NAME:
stream.instrument_name(data)
elif meta_type == LYRIC:
stream.lyric(data)
elif meta_type == MARKER:
stream.marker(data)
elif meta_type == CUEPOINT:
stream.cuepoint(data)
elif meta_type == PROGRAM_NAME:
stream.program_name(data)
elif meta_type == DEVICE_NAME:
stream.device_name(data)
elif meta_type == MIDI_CH_PREFIX:
channel = readBew(data)
stream.midi_ch_prefix(channel)
elif meta_type == MIDI_PORT:
port = readBew(data)
stream.midi_port(port)
elif meta_type == END_OF_TRACK:
stream.end_of_track()
elif meta_type == TEMPO:
b1, b2, b3 = toBytes(data)
stream.tempo((b1<<16) + (b2<<8) + b3)
elif meta_type == SMTP_OFFSET:
hour, minute, second, frame, framePart = toBytes(data)
stream.smtp_offset(
hour, minute, second, frame, framePart)
elif meta_type == TIME_SIGNATURE:
nn, dd, cc, bb = toBytes(data)
stream.time_signature(nn, dd, cc, bb)
elif meta_type == KEY_SIGNATURE:
sf, mi = toBytes(data)
stream.key_signature(sf, mi)
elif meta_type == SPECIFIC:
meta_data = toBytes(data)
stream.sequencer_specific(meta_data)
else:
meta_data = toBytes(data)
stream.meta_event(meta_type, meta_data)
class MidiFileParser:
def __init__(self, raw_in, outstream):
self.raw_in = raw_in
self.dispatch = EventDispatcher(outstream)
self._running_status = None
def parseMThdChunk(self):
raw_in = self.raw_in
header_chunk_type = raw_in.nextSlice(4)
header_chunk_zise = raw_in.readBew(4)
if header_chunk_type != 'MThd': raise TypeError, "It is not a valid midi file!"
self.format = raw_in.readBew(2)
self.nTracks = raw_in.readBew(2)
self.division = raw_in.readBew(2)
if header_chunk_zise > 6:
raw_in.moveCursor(header_chunk_zise-6)
self.dispatch.header(self.format, self.nTracks, self.division)
def parseMTrkChunk(self):
self.dispatch.reset_time()
dispatch = self.dispatch
raw_in = self.raw_in
dispatch.start_of_track(self._current_track)
raw_in.moveCursor(4)
tracklength = raw_in.readBew(4)
track_endposition = raw_in.getCursor() + tracklength
while raw_in.getCursor() < track_endposition:
time = raw_in.readVarLen()
dispatch.update_time(time)
peak_ahead = raw_in.readBew(move_cursor=0)
if (peak_ahead & 0x80):
status = self._running_status = raw_in.readBew()
else:
status = self._running_status
hi_nible, lo_nible = status & 0xF0, status & 0x0F
if status == META_EVENT:
meta_type = raw_in.readBew()
meta_length = raw_in.readVarLen()
meta_data = raw_in.nextSlice(meta_length)
dispatch.meta_events(meta_type, meta_data)
elif status == SYSTEM_EXCLUSIVE:
sysex_length = raw_in.readVarLen()
sysex_data = raw_in.nextSlice(sysex_length-1)
if raw_in.readBew(move_cursor=0) == END_OFF_EXCLUSIVE:
eo_sysex = raw_in.readBew()
dispatch.sysex_event(sysex_data)
elif hi_nible == 0xF0:
data_sizes = {
MTC:1,
SONG_POSITION_POINTER:2,
SONG_SELECT:1,
}
data_size = data_sizes.get(hi_nible, 0)
common_data = raw_in.nextSlice(data_size)
common_type = lo_nible
dispatch.system_common(common_type, common_data)
else:
data_sizes = {
PATCH_CHANGE:1,
CHANNEL_PRESSURE:1,
NOTE_OFF:2,
NOTE_ON:2,
AFTERTOUCH:2,
CONTINUOUS_CONTROLLER:2,
PITCH_BEND:2,
}
data_size = data_sizes.get(hi_nible, 0)
channel_data = raw_in.nextSlice(data_size)
event_type, channel = hi_nible, lo_nible
dispatch.channel_messages(event_type, channel, channel_data)
def parseMTrkChunks(self):
for t in range(self.nTracks):
self._current_track = t
self.parseMTrkChunk()
self.dispatch.eof()
class MidiInFile:
def __init__(self, outStream, infile=''):
self.raw_in = RawInstreamFile(infile)
self.parser = MidiFileParser(self.raw_in, outStream)
def read(self):
p = self.parser
p.parseMThdChunk()
p.parseMTrkChunks()
def setData(self, data=''):
self.raw_in.setData(data)
try: any
except: # Python 2.3 (RISC OS?)
def any(x):
for i in x:
if i: return True
return False
def all(x):
for i in x:
if not i: return False
return True
name = "BeebMid"
sys.stderr.write(name+" (c) 2007-2010, 2015-2020 <NAME>. - (c) d1ddle 2021 (https://d1ddle.com) License: Apache 2.0\n")
if not player_only and len(sys.argv)<2:
if not info_only:
if not art:
sys.stderr.write("Syntax: py -2 midi-beeper.py [options] MIDI-filename ...\nOptions: --bbc | --data | --player | --info | --art\n") # (all BBC-Micro related)
elif not info_only and len(sys.argv)<2:
sys.stderr.write("Syntax: py -2 midi-beeper.py [options] MIDI-filename ...\nOptions: --bbc | --data | --player | --info | --art\n") # (all BBC-Micro related)
elif player_only:
bbc_micro = "\n".join(bbc_micro).split("\n")
bbc_micro_player = []
for i in range(0,16):
bbc_micro_player.append(bbc_micro[i])
print "\nAU.0,1"
print "\n".join(bbc_micro_player)
sys.exit(1)
elif info_only:
print "\n\n ----WELCOME TO BEEBMID----"
print " --(c) <NAME> (c) d1ddle (https://d1ddle.com) 2007-2021--"
print "\n---- Copy, paste and save the player (--player) to your BBC DFS as 'PLAYER' ----"
print " ---- Copy, paste (--data) and 'RUN' to save DATA file ----"
print '\nThen run the following on your BBC/BeebEm:\n\n\nCH."PLAYER"\n*Input Data File Name*\n\nRUN\n'
print "\nYour music should play.\nThis means you can stream data from Disk and also repace music without regenerating the whole program.\nEventually I aim to expand this as a BBC midi editor."
sys.exit(1)
for midiFile in sys.argv[1:]:
init() ; dedup_chord,dedup_microsec = [],0
dedup_microsec_error = 0
sys.stderr.write("Parsing MIDI file "+midiFile+"\n")
MidiInFile(MidiToBeep(), open(midiFile,"rb")).read()
dedup_midi_note_chord([],None) # ensure flushed
if bbc_micro:
if len(bbc_micro)>1 and len(bbc_micro[-1])<233: bbc_micro[-1] += ",255,0"
else: bbc_micro.append("D.255,0")
if data_only:
bbc_micro = "\n".join(bbc_micro).split("\n")
bbc_micro_data = []
for i in range(16,len(bbc_micro)):
bbc_micro_data.append(bbc_micro[i])
if player_only:
bbc_micro = "\n".join(bbc_micro).split("\n")
print "\nPlayer length: 17\n"
bbc_micro_player = []
for i in range(0,15):
bbc_micro_player.append(bbc_micro[i])
bbc_micro = "\n".join(bbc_micro).split("\n")
if len(bbc_micro) > 3277: bbc_micro.insert(0,"AU."+str(32768-len(bbc_micro))+",1") # (although if this is the case, program is extremely likely to exhaust the memory even in Bas128)
else: bbc_micro.insert(0,"AU."+str(32770-10*len(bbc_micro)))
if player_only:
print "5 MODE 0"
print "10 CLS"
print "20 CLOSE #0"
print '30 PRINTTAB(0,1) "BEEBMID SOFTWARE - PLAYER 0.2.0"'
print '40 PRINT "(c) D1DDLE 2021 - (HTTPS://D1DDLE.COM)"'
print '50 PRINT " ____ _ __ __ | |
#!/usr/bin/env python3
""" Command Line Arguments """
import argparse
import logging
import os
import platform
import re
import sys
import textwrap
from importlib import import_module
from lib.logger import crash_log, log_setup
from lib.utils import safe_shutdown
from lib.model.masks import get_available_masks, get_default_mask
from plugins.plugin_loader import PluginLoader
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ScriptExecutor():
""" Loads the relevant script modules and executes the script.
This class is initialised in each of the argparsers for the relevant
command, then execute script is called within their set_default
function. """
def __init__(self, command, subparsers=None):
self.command = command.lower()
self.subparsers = subparsers
def import_script(self):
""" Only import a script's modules when running that script."""
self.test_for_tf_version()
self.test_for_gui()
cmd = os.path.basename(sys.argv[0])
src = "tools" if cmd == "tools.py" else "scripts"
mod = ".".join((src, self.command.lower()))
module = import_module(mod)
script = getattr(module, self.command.title())
return script
@staticmethod
def test_for_tf_version():
""" Check that the minimum required Tensorflow version is installed """
min_ver = 1.12
try:
import tensorflow as tf
except ImportError:
logger.error("Tensorflow is a requirement but is not installed on your system.")
exit(1)
tf_ver = float(".".join(tf.__version__.split(".")[:2]))
if tf_ver < min_ver:
logger.error("The minimum supported Tensorflow is version %s but you have version "
"%s installed. Please upgrade Tensorflow.", min_ver, tf_ver)
exit(1)
logger.debug("Installed Tensorflow Version: %s", tf_ver)
def test_for_gui(self):
""" If running the gui, check the prerequisites """
if self.command != "gui":
return
self.test_tkinter()
self.check_display()
@staticmethod
def test_tkinter():
""" If the user is running the GUI, test whether the
tkinter app is available on their machine. If not
exit gracefully.
This avoids having to import every tk function
within the GUI in a wrapper and potentially spamming
traceback errors to console """
try:
# pylint: disable=unused-variable
import tkinter # noqa pylint: disable=unused-import
except ImportError:
logger.warning(
"It looks like TkInter isn't installed for your OS, so "
"the GUI has been disabled. To enable the GUI please "
"install the TkInter application. You can try:")
logger.info("Anaconda: conda install tk")
logger.info("Windows/macOS: Install ActiveTcl Community Edition from "
"http://www.activestate.com")
logger.info("Ubuntu/Mint/Debian: sudo apt install python3-tk")
logger.info("Arch: sudo pacman -S tk")
logger.info("CentOS/Redhat: sudo yum install tkinter")
logger.info("Fedora: sudo dnf install python3-tkinter")
exit(1)
@staticmethod
def check_display():
""" Check whether there is a display to output the GUI. If running on
Windows then assume not running in headless mode """
if not os.environ.get("DISPLAY", None) and os.name != "nt":
logger.warning("No display detected. GUI mode has been disabled.")
if platform.system() == "Darwin":
logger.info("macOS users need to install XQuartz. "
"See https://support.apple.com/en-gb/HT201341")
exit(1)
def execute_script(self, arguments):
""" Run the script for called command """
log_setup(arguments.loglevel, arguments.logfile, self.command)
logger.debug("Executing: %s. PID: %s", self.command, os.getpid())
try:
script = self.import_script()
process = script(arguments)
process.process()
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except SystemExit:
pass
except Exception: # pylint: disable=broad-except
crash_file = crash_log()
logger.exception("Got Exception on main handler:")
logger.critical("An unexpected crash has occurred. Crash report written to %s. "
"Please verify you are running the latest version of faceswap "
"before reporting", crash_file)
finally:
safe_shutdown()
class Radio(argparse.Action): # pylint: disable=too-few-public-methods
""" Adds support for the GUI Radio buttons
Just a wrapper class to tell the gui to use radio buttons instead of combo boxes
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class Slider(argparse.Action): # pylint: disable=too-few-public-methods
""" Adds support for the GUI slider
An additional option 'min_max' must be provided containing tuple of min and max accepted
values.
'rounding' sets the decimal places for floats or the step interval for ints.
"""
def __init__(self, option_strings, dest, nargs=None, min_max=None, rounding=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, **kwargs)
self.min_max = min_max
self.rounding = rounding
def _get_kwargs(self):
names = ["option_strings",
"dest",
"nargs",
"const",
"default",
"type",
"choices",
"help",
"metavar",
"min_max", # Tuple containing min and max values of scale
"rounding"] # Decimal places to round floats to or step interval for ints
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class FullPaths(argparse.Action): # pylint: disable=too-few-public-methods
""" Expand user- and relative-paths """
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, (list, tuple)):
vals = [os.path.abspath(os.path.expanduser(val)) for val in values]
else:
vals = os.path.abspath(os.path.expanduser(values))
setattr(namespace, self.dest, vals)
class DirFullPaths(FullPaths):
""" Class that gui uses to determine if you need to open a directory """
# pylint: disable=too-few-public-methods,unnecessary-pass
pass
class FileFullPaths(FullPaths):
"""
Class that gui uses to determine if you need to open a file.
see lib/gui/utils.py FileHandler for current GUI filetypes
"""
# pylint: disable=too-few-public-methods
def __init__(self, option_strings, dest, nargs=None, filetypes=None, **kwargs):
super().__init__(option_strings, dest, nargs, **kwargs)
self.filetypes = filetypes
def _get_kwargs(self):
names = ["option_strings",
"dest",
"nargs",
"const",
"default",
"type",
"choices",
"help",
"metavar",
"filetypes"]
return [(name, getattr(self, name)) for name in names]
class FilesFullPaths(FileFullPaths): # pylint: disable=too-few-public-methods
""" Class that the gui uses to determine that the input can take multiple files as an input.
Inherits functionality from FileFullPaths
Has the effect of giving the user 2 Open Dialogue buttons in the gui """
pass
class DirOrFileFullPaths(FileFullPaths): # pylint: disable=too-few-public-methods
""" Class that the gui uses to determine that the input can take a folder or a filename.
Inherits functionality from FileFullPaths
Has the effect of giving the user 2 Open Dialogue buttons in the gui """
pass
class SaveFileFullPaths(FileFullPaths):
"""
Class that gui uses to determine if you need to save a file.
see lib/gui/utils.py FileHandler for current GUI filetypes
"""
# pylint: disable=too-few-public-methods,unnecessary-pass
pass
class ContextFullPaths(FileFullPaths):
"""
Class that gui uses to determine if you need to open a file or a
directory based on which action you are choosing
To use ContextFullPaths the action_option item should indicate which
cli option dictates the context of the filesystem dialogue
Bespoke actions are then set in lib/gui/utils.py FileHandler
"""
# pylint: disable=too-few-public-methods, too-many-arguments
def __init__(self, option_strings, dest, nargs=None, filetypes=None,
action_option=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(ContextFullPaths, self).__init__(option_strings, dest,
filetypes=None, **kwargs)
self.action_option = action_option
self.filetypes = filetypes
def _get_kwargs(self):
names = ["option_strings",
"dest",
"nargs",
"const",
"default",
"type",
"choices",
"help",
"metavar",
"filetypes",
"action_option"]
return [(name, getattr(self, name)) for name in names]
class FullHelpArgumentParser(argparse.ArgumentParser):
""" Identical to the built-in argument parser, but on error it
prints full help message instead of just usage information """
def error(self, message):
self.print_help(sys.stderr)
args = {"prog": self.prog, "message": message}
self.exit(2, "%(prog)s: error: %(message)s\n" % args)
class SmartFormatter(argparse.HelpFormatter):
""" Smart formatter for allowing raw formatting in help
text and lists in the helptext
To use: prefix the help item with "R|" to overide
default formatting. List items can be marked with "L|"
at the start of a newline
adapted from: https://stackoverflow.com/questions/3853722 """
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
super().__init__(prog, indent_increment, max_help_position, width)
self._whitespace_matcher_limited = re.compile(r'[ \r\f\v]+', re.ASCII)
def _split_lines(self, text, width):
if text.startswith("R|"):
text = self._whitespace_matcher_limited.sub(' ', text).strip()[2:]
output = list()
for txt in text.splitlines():
indent = ""
if txt.startswith("L|"):
indent = " "
txt = " - {}".format(txt[2:])
output.extend(textwrap.wrap(txt, width, subsequent_indent=indent))
return output
return argparse.HelpFormatter._split_lines(self, text, width)
class FaceSwapArgs():
""" Faceswap argument parser functions that are universal
to all commands. Should be the parent function of all
subsequent argparsers """
def __init__(self, subparser, command,
description="default", subparsers=None):
self.global_arguments = self.get_global_arguments()
self.argument_list = self.get_argument_list()
self.optional_arguments = self.get_optional_arguments()
if not subparser:
return
self.parser = self.create_parser(subparser, command, description)
self.add_arguments()
script = ScriptExecutor(command, subparsers)
self.parser.set_defaults(func=script.execute_script)
@staticmethod
def get_argument_list():
""" Put the arguments in a list so that they are accessible from both
argparse and gui override for command specific arguments """
argument_list = []
return argument_list
@staticmethod
def get_optional_arguments():
""" Put the arguments in a list so that they are accessible from both
argparse and gui. This is used for when there are sub-children
(e.g. convert and extract) Override this for custom arguments """
argument_list = []
return argument_list
@staticmethod
def get_global_arguments():
""" Arguments that are used in ALL parts of Faceswap
DO NOT override this """
global_args = list()
global_args.append({"opts": ("-L", "--loglevel"),
"type": str.upper,
"dest": "loglevel",
"default": "INFO",
"choices": ("INFO", "VERBOSE", "DEBUG", "TRACE"),
"help": "Log level. Stick with INFO or VERBOSE unless you need to | |
from ..v2019_07_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import AzureFirewallFqdnTagsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def azure_firewalls(self):
"""Instance depends on the API version:
* 2018-04-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_04_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-06-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_06_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-07-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_07_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-08-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-10-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-11-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.AzureFirewallsOperations>`
* 2018-12-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-02-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-04-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-06-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-07-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-08-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-09-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-11-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.AzureFirewallsOperations>`
* 2019-12-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.AzureFirewallsOperations>`
* 2020-03-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.AzureFirewallsOperations>`
* 2020-04-01: :class:`AzureFirewallsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.AzureFirewallsOperations>`
"""
api_version = self._get_api_version('azure_firewalls')
if api_version == '2018-04-01':
from ..v2018_04_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-07-01':
from ..v2018_07_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import AzureFirewallsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import AzureFirewallsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def bastion_hosts(self):
"""Instance depends on the API version:
* 2019-04-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.BastionHostsOperations>`
* 2019-06-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.BastionHostsOperations>`
* 2019-07-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.BastionHostsOperations>`
* 2019-08-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.BastionHostsOperations>`
* 2019-09-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.BastionHostsOperations>`
* 2019-11-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.BastionHostsOperations>`
* 2019-12-01: :class:`BastionHostsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.BastionHostsOperations>`
* 2020-03-01: :class:`BastionHostsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.BastionHostsOperations>`
* 2020-04-01: :class:`BastionHostsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.BastionHostsOperations>`
"""
api_version = self._get_api_version('bastion_hosts')
if api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import BastionHostsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import BastionHostsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def bgp_service_communities(self):
"""Instance depends on the API version:
* 2016-12-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2016_12_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-03-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_03_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-06-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_06_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-08-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_08_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-09-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_09_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-10-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_10_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2017-11-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2017_11_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-01-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_01_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-02-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_02_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-04-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_04_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-06-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_06_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-07-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_07_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-08-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-10-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-11-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2018-12-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-02-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-04-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-06-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-07-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-08-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-09-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-11-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2019-12-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2020-03-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.BgpServiceCommunitiesOperations>`
* 2020-04-01: :class:`BgpServiceCommunitiesOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.BgpServiceCommunitiesOperations>`
"""
api_version = self._get_api_version('bgp_service_communities')
if api_version == '2016-12-01':
from ..v2016_12_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-03-01':
from ..v2017_03_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-06-01':
from ..v2017_06_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-08-01':
from ..v2017_08_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-09-01':
from ..v2017_09_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-10-01':
from ..v2017_10_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2017-11-01':
from ..v2017_11_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-01-01':
from ..v2018_01_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-07-01':
from ..v2018_07_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import BgpServiceCommunitiesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def connection_monitors(self):
"""Instance depends on the API version:
* 2017-10-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2017_10_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2017-11-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2017_11_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-01-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_01_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-02-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_02_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-04-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_04_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-06-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_06_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-07-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_07_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-08-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_08_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-10-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_10_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-11-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2018-12-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-02-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-04-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-06-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-07-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-08-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-09-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-11-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2019-12-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2020-03-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.ConnectionMonitorsOperations>`
* 2020-04-01: :class:`ConnectionMonitorsOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.ConnectionMonitorsOperations>`
"""
api_version = self._get_api_version('connection_monitors')
if api_version == '2017-10-01':
from ..v2017_10_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2017-11-01':
from ..v2017_11_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-01-01':
from ..v2018_01_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-04-01':
from ..v2018_04_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-06-01':
from ..v2018_06_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-07-01':
from ..v2018_07_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-08-01':
from ..v2018_08_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-10-01':
from ..v2018_10_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2020-03-01':
from ..v2020_03_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
elif api_version == '2020-04-01':
from ..v2020_04_01.aio.operations_async import ConnectionMonitorsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def ddos_custom_policies(self):
"""Instance depends on the API version:
* 2018-11-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2018_11_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2018-12-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2018_12_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-02-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_02_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-04-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_04_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-06-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_06_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-07-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_07_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-08-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_08_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-09-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_09_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-11-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_11_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2019-12-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2019_12_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2020-03-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2020_03_01.aio.operations_async.DdosCustomPoliciesOperations>`
* 2020-04-01: :class:`DdosCustomPoliciesOperations<azure.mgmt.network.v2020_04_01.aio.operations_async.DdosCustomPoliciesOperations>`
"""
api_version = self._get_api_version('ddos_custom_policies')
if api_version == '2018-11-01':
from ..v2018_11_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2018-12-01':
from ..v2018_12_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-02-01':
from ..v2019_02_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-04-01':
from ..v2019_04_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-06-01':
from ..v2019_06_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-11-01':
from ..v2019_11_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2019-12-01':
from ..v2019_12_01.aio.operations_async import DdosCustomPoliciesOperations as OperationClass
elif api_version == '2020-03-01':
| |
# -*- coding: utf-8 -*-
"""Graph traversal methods."""
from typing import Any, Dict, Iterable, Tuple, List, Union
from networkx import DiGraph
__all__ = [
'compute_all_paths_multitarget',
'compute_all_paths_multitarget_dict',
]
def _compute_all_paths_multitarget(graph, source, targets, history, lmax):
"""Compute all paths and store history for each visited node containing number of paths already found.
:param graph: graph
:param source: source node
:param targets: target nodes
:param history:
:param lmax:
:return:
"""
if lmax == 0:
return
paths = [[[0, 0] for _ in targets] for _ in range(lmax)]
for neighbor in graph.neighbors(source):
if neighbor in targets or (not targets and graph.nodes[neighbor]['isTarget']):
target_id = targets.index(neighbor)
if graph[source][neighbor]['polarity'] == 1:
paths[0][target_id][0] += 1
else:
paths[0][target_id][1] += 1
continue
if neighbor not in history or neighbor in history and len(history[neighbor]) < lmax - 1:
_compute_all_paths_multitarget(graph, neighbor, targets, history, lmax - 1)
# After a node has been visited,
# history[neighbor] is a dictionary with
# all paths found for lenghts < lmax.
# Different keys in dict groups paths
# by their size (in number of nodes).
if neighbor in history:
for length in range(len(history[neighbor][:lmax - 1])):
if graph[source][neighbor]['polarity'] == 1:
for target_id in range(len(targets)):
paths[length + 1][target_id][0] += history[neighbor][length][target_id][0]
paths[length + 1][target_id][1] += history[neighbor][length][target_id][1]
else:
for target_id in range(len(targets)):
paths[length + 1][target_id][0] += history[neighbor][length][target_id][1]
paths[length + 1][target_id][1] += history[neighbor][length][target_id][0]
history[source] = paths
def _compute_all_paths_multitarget_dict(graph, source, targets, history, lmax):
"""Compute all paths and store history for each visited node containing number of paths already found.
:param graph: graph
:param source: source node
:param targets: target nodes
:param lmax: lmax
:param history:
"""
if lmax == 0:
return
paths = [{} for _ in range(lmax)]
for neighbor in graph.neighbors(source):
if graph.nodes[neighbor]['isTarget']:
if neighbor in targets or not targets:
paths[0][neighbor] = [0, 0]
if graph[source][neighbor]['polarity'] == 1:
paths[0][neighbor][0] += 1
else:
paths[0][neighbor][1] += 1
continue
if neighbor not in history or neighbor in history and len(history[neighbor]) < lmax - 1:
_compute_all_paths_multitarget_dict(graph, neighbor, targets, history, lmax - 1)
# After a node has been visited,
# history[neighbor] is a dictionary with
# all paths found for lenghts < lmax.
# Different keys in dict groups paths
# by their size (in number of nodes).
if neighbor in history:
for length in range(len(history[neighbor][:lmax - 1])):
relation_polarity = graph[source][neighbor]['polarity']
for target in history[neighbor][length].keys():
if target not in paths[length + 1]:
paths[length + 1][target] = [0, 0]
if relation_polarity == 1:
paths[length + 1][target][0] += history[neighbor][length][target][0]
paths[length + 1][target][1] += history[neighbor][length][target][1]
else:
paths[length + 1][target][0] += history[neighbor][length][target][1]
paths[length + 1][target][1] += history[neighbor][length][target][0]
history[source] = paths
def get_paths_to_target(graph, source, target, visited_nodes, history, lmax):
"""Return all paths from source to target avoiding nodes in visitied_nodes.
Paths are returned as a tuple with
(1) a pair of integers with the total number of paths source to target
(2) a dictionary of all nodes and how many nodes cross through them
"""
if lmax == 0:
if graph[source][target]['polarity'] == 1:
return [1, 0], {}
else:
return [0, 1], {}
# pre conditions:
# 1. len(history[source]) is at least lmax
# 2. target in history[source][lmax]
valid_neighbors = []
for node in history[source][lmax][target][1]:
if graph.has_edge(source, node):
valid_neighbors.append(node)
paths = [0, 0]
node_count = {}
for neighbor in valid_neighbors:
# 1. len(history[neighbor]) is at least lmax-1
if len(history[neighbor]) < lmax - 1:
continue
# 2. target in history[neighbor][lmax - 1]
if target not in history[neighbor][lmax - 1]:
continue
# if neighbor is in list of visited_nodes, we're not interested in its paths.
if neighbor in visited_nodes:
continue
# if neighbor has paths involving any of the visited_nodes,
# recursive call will return w/o them.
nodes_in_subpath = [node for node in history[neighbor][lmax - 1][target][1].keys()]
if any(node in visited_nodes + [source] for node in nodes_in_subpath):
neighbor_paths, neighbor_node_count = get_paths_to_target(
graph, neighbor, target, visited_nodes + [source], history, lmax - 1)
else:
neighbor_paths = history[neighbor][lmax - 1][target][0]
neighbor_node_count = history[neighbor][lmax - 1][target][1]
relation_polarity = graph[source][neighbor]['polarity']
activation_index = 0 if relation_polarity == 1 else 1
inhibition_index = 1 if relation_polarity == 1 else 0
for node, count in neighbor_node_count.items():
if node not in node_count:
node_count[node] = [0, 0]
node_count[node][0] += count[activation_index]
node_count[node][1] += count[inhibition_index]
if sum(neighbor_paths):
if neighbor not in node_count:
node_count[neighbor] = [0, 0]
node_count[neighbor][0] += neighbor_paths[activation_index]
node_count[neighbor][1] += neighbor_paths[inhibition_index]
paths[0] += neighbor_paths[activation_index]
paths[1] += neighbor_paths[inhibition_index]
return paths, node_count
def get_paths_through(graph, source, target, crossnode, visited_nodes, history, lmax):
"""Return all paths crossing crossnode.
Paths are returned as a tuple with
(1) a pair of integers with the total number of paths source to target.
(2) a dictionary of all nodes and how many nodes cross through them
If source == crossnode, call get_paths_to_target to get all paths from here.
Otherwise, select neighbors that include crossnode in their history of length lmax - 1
"""
# if source == crossnode, we need to get all paths but source won't be involved anymore,
# therefore, we have to switch to get_paths_to_target function
if source == crossnode:
paths, node_count = get_paths_to_target(
graph, source, target, visited_nodes + [source], history, lmax - 1
)
return paths, node_count
valid_neighbors = []
for node in history[source][lmax][target][1]:
if graph.has_edge(source, node):
valid_neighbors.append(node)
paths = [0, 0]
node_count = {}
for neighbor in valid_neighbors:
# 1. len(history[neighbor]) is at least lmax-1
if len(history[neighbor]) < lmax - 1:
continue
# 2. target in history[neighbor][lmax - 1]
if target not in history[neighbor][lmax - 1]:
continue
# 3. neighbor not in visited_nodes
if neighbor in visited_nodes:
continue
elif neighbor == crossnode:
neighbor_paths, neighbor_node_count = get_paths_to_target(
graph, neighbor, target, visited_nodes + [source], history, lmax - 1)
# crossnode must be in intermediate paths for neighbor to be considered
elif crossnode not in history[neighbor][lmax - 1][target][1]:
continue
else:
neighbor_paths, neighbor_node_count = get_paths_through(
graph, neighbor, target, crossnode, visited_nodes + [source], history, lmax - 1)
relation_polarity = graph[source][neighbor]['polarity']
activation_index = 0 if relation_polarity == 1 else 1
inhibition_index = 1 if relation_polarity == 1 else 0
for node, count in neighbor_node_count.items():
if node not in node_count:
node_count[node] = [0, 0]
node_count[node][0] += count[activation_index]
node_count[node][1] += count[inhibition_index]
if sum(neighbor_paths):
if neighbor not in node_count:
node_count[neighbor] = [0, 0]
node_count[neighbor][0] += neighbor_paths[activation_index]
node_count[neighbor][1] += neighbor_paths[inhibition_index]
paths[0] += neighbor_paths[activation_index]
paths[1] += neighbor_paths[inhibition_index]
return paths, node_count
def _compute_all_simple_paths_multitarget_dict(graph, source, targets, lmax, history, cycle_history={}):
"""Compute all simple paths and store history for each visited node containing number of paths already found.
:param graph: graph
:param source: source node
:param targets: target nodes
:param lmax: maximum number of edges to find a path
:param history: maximum number of edges to find a path
:param cycle_history:
:return:
"""
if lmax == 0:
return
_history_source = [{} for _ in range(lmax)]
neighbor_nodes = [neighbor for neighbor in graph.neighbors(source)]
for neighbor in neighbor_nodes:
if graph.nodes[neighbor]['isTarget']:
if neighbor in targets or not targets:
_history_source[0][neighbor] = [[0, 0], {}]
if graph[source][neighbor]['polarity'] == 1:
_history_source[0][neighbor][0][0] = 1
else:
_history_source[0][neighbor][0][1] = 1
continue
if neighbor not in history or (neighbor in history and len(history[neighbor]) < lmax - 1):
_compute_all_simple_paths_multitarget_dict(
graph=graph,
source=neighbor,
targets=targets,
lmax=lmax - 1,
history=history,
cycle_history=cycle_history,
)
if neighbor in history:
for length in range(len(history[neighbor][:lmax - 1])):
relation_polarity = graph[source][neighbor]['polarity']
for target in history[neighbor][length].keys():
if target not in _history_source[length + 1]:
_history_source[length + 1][target] = [[0, 0], {}]
activation_index = 0 if relation_polarity == 1 else 1
inhibition_index = 1 if relation_polarity == 1 else 0
# Get paths to target
paths_to_target = [history[neighbor][length][target][0][0], history[neighbor][length][target][0][1]]
# If there're no cycles, intermediate_nodes will not be modified
# and there's no need for a costly deepcopy operation
intermediate_nodes = history[neighbor][length][target][1]
# Get paths starting from neighbor that have source as intermediate node
paths_in_cycle = intermediate_nodes.get(source, [0, 0])
if paths_in_cycle[0] or paths_in_cycle[1]:
# Cycle detected.
# Find all paths / nodes involved in cycle so they can be removed
# from neighbor's list before being added to source's.
nodes_in_cycles = {}
number_of_paths_in_cycles = []
if (
source in cycle_history[0]
and neighbor in cycle_history[0][source]
and target in cycle_history[0][source][neighbor]
and length in cycle_history[0][source][neighbor][target]
):
number_of_paths_in_cycles, nodes_in_cycles = cycle_history[0][source][neighbor][target][
length]
else:
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Jan 2018
hacer calib extrisneca con pymc3
@author: sebalander
"""
# %%
# import glob
import os
import corner
import time
import seaborn as sns
import scipy as sc
import scipy.stats as sts
import matplotlib.pyplot as plt
from copy import deepcopy as dc
import numpy as np
from importlib import reload
from glob import glob
# %env THEANO_FLAGS='device=cuda, floatX=float32'
import theano
import theano.tensor as T
import pymc3 as pm
import cv2
import scipy.optimize as opt
import sys
sys.path.append("/home/sebalander/Code/sebaPhD")
from calibration import calibrator as cl
from dev import bayesLib as bl
import pickle
from calibration.calibrator import datafull, real, realdete, realbalk, realches
from calibration.calibrator import synt, syntextr, syntches, syntintr
import numdifftools as ndft
from time import sleep
print('libraries imported')
# compute_test_value is 'off' by default, meaning this feature is inactive
theano.config.compute_test_value = 'off' # Use 'warn' to activate this feature
# %%
def radiusStepsNdim(n):
'''
retorna moda, la media y la desv est del radio de pasos al samplear
de gaussianas hiperdimensionales de sigma 1
'''
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n-1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n - 1) exp(-x^2/2) dx = 2^(n/2 - 1) Γ(n/2) for Re(n)>0
Inorm = 2**(n / 2 - 1) * sc.special.gamma(n / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5En+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^n exp(-x^2/2) dx = 2^((n - 1)/2) Γ((n + 1)/2) for Re(n)>-1
ExpectedR = 2**((n - 1) / 2) * sc.special.gamma((n + 1) / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n%2B1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n + 1) exp(-x^2/2) dx = 2^(n/2) Γ(n/2 + 1) for Re(n)>-2
ExpectedR2 = 2**(n / 2) * sc.special.gamma(n / 2 + 1)
ModeR = np.sqrt(n - 1)
# normalizo las integrales:
ExpectedR /= Inorm
ExpectedR2 /= Inorm
DesvEstR = np.sqrt(ExpectedR2 - ExpectedR**2)
return np.array([ModeR, ExpectedR, DesvEstR])
def extractCaseData(exCase):
objpoints = fullData.Synt.Extr.objPt
imagePoints = fullData.Synt.Extr.imgPt[exCase[0], exCase[1]]
imagePoints += stdPix * fullData.Synt.Extr.imgNse[exCase[0], exCase[1]]
if not exCase[2]:
objpoints = objpoints[fullData.Synt.Extr.index10]
imagePoints = imagePoints[fullData.Synt.Extr.index10]
xi, yi = imagePoints.T # lo dejo listo para el modelo theano
# load true rotation traslation values
rVecsT = fullData.Synt.Extr.rVecs[exCase[0]]
tVecsT = fullData.Synt.Extr.tVecs[exCase[0], exCase[1]]
# select wich points are in the camera FOV, con la coordenada z
xCam = (cv2.Rodrigues(rVecsT)[0][2, :2].dot(objpoints.T)).T + tVecsT[2]
inFOV = xCam > 0
return imagePoints[inFOV], objpoints[inFOV], rVecsT, tVecsT
# %%
pi2 = 2 * np.pi
pi2sq = np.sqrt(pi2)
def prob1vs0(t, x, adaptPrior=True):
'''
calculo el logaritmo del cociente de las probabilidades de que una serie
de datos siga
un modelo cnstante o lineal, si < 1 es que es mas probable el modelo
constante
'''
m0, c0 = np.polyfit(t, x, 0, cov=True)
m1, c1 = np.polyfit(t, x, 1, cov=True)
dif0 = x - m0[0]
var0 = np.mean((dif0)**2)
if np.isclose(var0, 0): # si varianza da cero es que sampleo mal
return 1 # devuelve para que se considere que no convergió
dif1 = x - m1[0] * t - m1[1]
var1 = np.mean((dif1)**2)
if np.allclose(var1, 0):
return 1
# defino los priors
if adaptPrior:
pConst0 = 1 / (np.max(dif0) - np.min(dif0)) # prior de la constante
deltaDif1 = np.max(dif1) - np.min(dif1)
pConst1 = 1 / deltaDif1
penDelta = deltaDif1 / (t[-1] - t[0])
pPendi1 = 1 / penDelta / 2 # prior de la pendiente
pWgH0 = np.log(pConst0)
pWgH1 = np.log(pConst1 * pPendi1)
else:
pWgH0 = 1.0
pWgH1 = 1.0
pDagWH0 = sc.stats.multivariate_normal.logpdf(dif0, cov=var0)
pDagWH1 = sc.stats.multivariate_normal.logpdf(dif1, cov=var1)
deltaW0 = np.log(pi2sq * np.sqrt(c0)[0, 0])
deltaW1 = np.log(pi2 * np.sqrt(np.linalg.det(c1)))
prob1_0 = np.sum(pDagWH1 - pDagWH0)
prob1_0 += pWgH1 * deltaW1 - pWgH0 - deltaW0
return prob1_0
def funcExp(x, a, b, c):
return a * np.exp(- x / np.abs(b)) + c
# %%
def logPerror(xAll, case):
rvec = xAll[:3]
tvec = xAll[3:]
Eint = cl.errorCuadraticoImagen(case.imagePoints, case.objpoints,
rvec, tvec, case.cameraMatrix, case.distCoeffs, case.model,
case.Ci, case.Cf, case.Ck, case.Crt, case.Cfk)
return Eint
def objective(xAll, case):
return np.sum(logPerror(xAll, case))
hessNum = ndft.Hessian(objective)
def optimizar(xAll, case):
'''
guarda en el objeto la posicion optimizada y una covarianza calculada como
derivada numerica
'''
ret = opt.minimize(objective, xAll, args=case)
case.xAllOpt = ret.x
case.covOpt = np.linalg.inv(hessNum(case.xAllOpt, case))
class casoCalibExtr:
def __init__(self, fullData, intrCalibResults, case, stdPix, allDelta,
nCores, nTune, nTuneInter, tuneBool, tune_thr, tallyBool,
convChecksBool, rndSeedBool, scaNdim, scaAl, nDraws, nChains,
indexSave, pathFiles):
self.case = case
self.nCores = nCores
self.nTune = nTune
self.nTuneInter = nTuneInter
self.tuneBool = tuneBool
self.tune_thr = tune_thr
self.tallyBool = tallyBool
self.convChecksBool = convChecksBool
self.rndSeedBool = rndSeedBool
self.scaNdim = scaNdim
self.scaAl = scaAl
self.nDraws = nDraws
self.nChains = nChains
self.indexSave = indexSave
self.pathFiles = pathFiles
self.allDelta = allDelta
self.camera = fullData.Synt.Intr.camera
self.model = fullData.Synt.Intr.model
self.imgSize = fullData.Synt.Intr.s
Ns = [2, 3]
Xint = intrCalibResults['mean'][:3]
self.cameraMatrix, self.distCoeffs = bl.flat2int(Xint, Ns, self.model)
caseData = extractCaseData(exCase)
self.imagePoints = caseData[0]
self.xi, self.yi = self.imagePoints.T
self.objpoints = caseData[1]
self.rVecsT = caseData[2]
self.tVecsT = caseData[3]
self.xAllT = np.concatenate([self.rVecsT, self.tVecsT])
self.nPt = self.objpoints.shape[0]
self.nFree = 6
self.observedNormed = np.zeros((self.nPt * 2))
self.Crt = False # no RT error
self.Cf = np.zeros((4, 4))
self.Cf[2:, 2:] = intrCalibResults['cov'][:2, :2]
self.Ck = intrCalibResults['cov'][2, 2]
self.Cfk = np.zeros(4)
self.Cfk[2:] = intrCalibResults['cov'][:2, 2]
self.Ci = np.array([stdPix**2 * np.eye(2)] * self.nPt)
# tau de 1/5 pra la ventana movil
self.weiEsp = np.exp(- np.arange(nDraws) * 5 / nDraws)[::-1]
self.weiEsp /= np.sum(self.weiEsp)
# %%
'''
funcion arbitraria para theano
https://docs.pymc.io/notebooks/getting_started.html
https://github.com/pymc-devs/pymc3/blob/master/pymc3/examples/disaster_model_theano_op.py
'''
from theano.compile.ops import as_op
@as_op(itypes=[T.dvector], otypes=[T.dvector])
def project2diagonalisedError(xAll):
'''
no me queda otra que leer case desde el main como una variable global
'''
c = case
xm, ym, Cm = cl.inverse(c.xi, c.yi, xAll[:3], xAll[3:], c.cameraMatrix,
c.distCoeffs, c.model, c.Ci, c.Cf, c.Ck, c.covOpt, c.Cfk)
xNorm, yNorm = cl.points2linearised(xm - c.objpoints[:, 0],
ym - c.objpoints[:, 1], Cm).T
return np.concatenate([xNorm, yNorm])
#class project2diagonalisedError(theano.Op):
# # Properties attribute
# __props__ = ("xi", "yi", "cameraMatrix", "distCoeffs", "model", "Ci", "Cf", "Ck", "covOpt", "Cfk", "objpoints")
#
# #itypes and otypes attributes are
# #compulsory if make_node method is not defined.
# #They're the type of input and output respectively
# itypes = [T.dvector]
# otypes = [T.dvector]
#
# def __init__(self, case):
# self.xi, self.yi, self.cameraMatrix, self.distCoeffs, self.model, self.Ci, self.Cf, self.Ck, self.covOpt, self.Cfk, self.objpoints = [case.xi, case.yi, case.cameraMatrix, case.distCoeffs, case.model, case.Ci, case.Cf, case.Ck, case.covOpt, case.Cfk, case.objpoints]
#
#
# # Python implementation:
# def perform(self, node, inputs_storage, output_storage):
# xAll = inputs_storage[0][0]
# xm, ym, Cm = cl.inverse(self.xi, self.yi, xAll[:3], xAll[3:], self.cameraMatrix,
# self.distCoeffs, self.model, self.Ci, self.Cf, self.Ck, self.covOpt, self.Cfk)
#
# xNorm, yNorm = cl.points2linearised(xm - self.objpoints[:, 0],
# ym - self.objpoints[:, 1], Cm).T
#
# output_storage[0] = np.float64(np.concatenate([xNorm, yNorm]))
def getTrace(alMean, Sal, case):
# prior bounds
allLow = case.xAllT - case.allDelta
allUpp = case.xAllT + case.allDelta
alSeed = np.random.randn(case.nChains, case.nFree) * Sal # .reshape((-1, 1))
alSeed += alMean # .reshape((-1, 1))
start = [dict({'xAl': alSeed[i]}) for i in range(nChains)]
projectionModel = pm.Model()
with projectionModel:
# Priors for unknown model parameters
xAl = pm.Uniform('xAl', lower=allLow, upper=allUpp, shape=allLow.shape,
transform=None)
xAl.tag.test_value= case.xAllT
#
# proj = project2diagonalisedError(case)
# x = theano.tensor.vector()
# x.tag.test_value= case.xAllT
# xyMNor = project2diagonalisedError(xAl, theano.shared(case))
# f = theano.function([xAl], project2diagonalisedError(case)(xAl))
# xyMNor = f(xAl)
xyMNor = project2diagonalisedError(xAl)
Y_obs = pm.Normal('Y_obs', mu=xyMNor, sd=1, observed=case.observedNormed)
step = pm.DEMetropolis(vars=[xAl], S=Sal, tune=tuneBool,
tune_interval=nTuneInter, tune_throughout=tune_thr,
tally=tallyBool, scaling=scaAl)
step.tune = tuneBool
step.lamb = scaAl
step.scaling = scaAl
trace = pm.sample(draws=nDraws, step=step, njobs=nChains,
start=start,
tune=nTune, chains=nChains, progressbar=True,
discard_tuned_samples=False, cores=nCores,
compute_convergence_checks=convChecksBool,
parallelize=True)
return trace
#lor = np.array([-100]*6)
#upr = - lor
#xAl = pm.Uniform.dist(lor, upr, shape=lor.shape)
#
#f = theano.function([xAl], project2diagonalisedError(case)(xAl))
#xyMNor = f(xAl)
#getTrace(case.xAllOpt, np.sqrt(np.diag(case.covOpt)), case)
'''
ponele que anda, no upde hacer que la funcion acepte a "case" como argumento
o algo que no sea leerlo desde las variables globales del main.
incluso fallo definir como un objeto mas complicado que pudiera inicializarse
guardando los parametros que necesito
'''
# %%
def getStationaryTrace(exCase):
imagePoints, objpoints, rVecsT, tVecsT = extractCaseData(exCase)
xi, yi = imagePoints.T
nPt = objpoints.shape[0] # cantidad de puntos
Ci = np.array([stdPix**2 * np.eye(2)] * nPt)
nFree = 6 # nro de parametros libres
# pongo en forma flat los valores iniciales
xAllT = np.concatenate([rVecsT, tVecsT])
# pongo en forma flat los valores iniciales
xAllT = np.concatenate([rVecsT, tVecsT])
print('data loaded and formated')
ret = opt.minimize(objective, xAllT)
xAllOpt = ret.x
covNum = np.linalg.inv(hessNum(xAllOpt)) # la achico por las dudas?
print('initial optimisation and covariance estimated')
# for proposal distr
alMean = xAllOpt
Sal = np.sqrt(np.diag(covNum))
print('defined parameters')
means = list()
stdes = list()
tracesList = list()
probList = list()
for intento in range(50):
print("\n\n")
print("============================")
print('intento nro', intento, ' caso ', exCase)
trace = getTrace(alMean, Sal)
sleep(5) # espero un ratito ...
traceArray = trace['xAl'].reshape((nChains, -1, nFree))
traceArray = traceArray.transpose((2, 1, 0))
tracesList.append(traceArray)
traceMean = np.mean(traceArray, axis=2)
traceStd = np.std(traceArray, axis=2)
means.append(traceMean)
stdes.append(traceStd)
probMean = np.zeros(6)
probStd = np.zeros(6)
for i in | |
sage: X(q)
(-2, 3)
"""
if isinstance(coords, ManifoldPoint):
point = coords # for readability
# This should actually never happen by the coercion framework...
if point.parent() is self:
return point
if point in self:
resu = self.element_class(self, name=point._name,
latex_name=point._latex_name)
for chart, coords in point._coordinates.items():
resu._coordinates[chart] = coords
return resu
else:
raise ValueError("the {}".format(point) +
" is not in {}".format(self))
return self.element_class(self, coords=coords, chart=chart,
name=name, latex_name=latex_name,
check_coords=check_coords)
def _an_element_(self):
r"""
Construct some point in the subset.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: A = M.subset('A')
sage: p = A._an_element_(); p
Point on the 2-dimensional topological manifold M
sage: p in A
True
"""
#!# should be improved...
return self.element_class(self)
#### End of methods required for any Parent in the category of sets
def __contains__(self, point):
r"""
Check whether ``point`` is contained in ``self``.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: A = M.subset('A')
sage: p = A((-2,3), chart=X); p
Point on the 2-dimensional topological manifold M
sage: A.__contains__(p)
True
sage: p in A # indirect doctest
True
sage: A.__contains__(A.an_element())
True
sage: q = M((0,0), chart=X); q
Point on the 2-dimensional topological manifold M
sage: A.__contains__(q)
False
"""
# for efficiency, a quick test first:
if point.parent() is self:
return True
if point.parent().is_subset(self):
return True
#!# should be improved once coordinate definition have been introduced
# in ManifoldSubset
return False
def lift(self, p):
r"""
Return the lift of ``p`` to the ambient manifold of ``self``.
INPUT:
- ``p`` -- point of the subset
OUTPUT:
- the same point, considered as a point of the ambient manifold
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: A = M.open_subset('A', coord_def={X: x>0})
sage: p = A((1, -2)); p
Point on the 2-dimensional topological manifold M
sage: p.parent()
Open subset A of the 2-dimensional topological manifold M
sage: q = A.lift(p); q
Point on the 2-dimensional topological manifold M
sage: q.parent()
2-dimensional topological manifold M
sage: q.coord()
(1, -2)
sage: (p == q) and (q == p)
True
"""
return self._manifold(p)
def retract(self, p):
r"""
Return the retract of ``p`` to ``self``.
INPUT:
- ``p`` -- point of the ambient manifold
OUTPUT:
- the same point, considered as a point of the subset
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: A = M.open_subset('A', coord_def={X: x>0})
sage: p = M((1, -2)); p
Point on the 2-dimensional topological manifold M
sage: p.parent()
2-dimensional topological manifold M
sage: q = A.retract(p); q
Point on the 2-dimensional topological manifold M
sage: q.parent()
Open subset A of the 2-dimensional topological manifold M
sage: q.coord()
(1, -2)
sage: (q == p) and (p == q)
True
Of course, if the point does not belong to ``A``, the ``retract``
method fails::
sage: p = M((-1, 3)) # x < 0, so that p is not in A
sage: q = A.retract(p)
Traceback (most recent call last):
...
ValueError: the Point on the 2-dimensional topological manifold M
is not in Open subset A of the 2-dimensional topological manifold M
"""
return self(p)
#### Accessors
def manifold(self):
r"""
Return the ambient manifold of ``self``.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: A = M.subset('A')
sage: A.manifold()
2-dimensional topological manifold M
sage: A.manifold() is M
True
sage: B = A.subset('B')
sage: B.manifold() is M
True
An alias is ``ambient``::
sage: A.ambient() is A.manifold()
True
"""
return self._manifold
ambient = manifold
def is_open(self):
r"""
Return if ``self`` is an open set.
This method always returns ``False``, since open subsets must be
constructed as instances of the subclass
:class:`~sage.manifolds.manifold.TopologicalManifold`
(which redefines ``is_open``)
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: A = M.subset('A')
sage: A.is_open()
False
"""
return False
def is_closed(self):
r"""
Return if ``self`` is a closed set.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: M.is_closed()
True
sage: also_M = M.subset('also_M')
sage: M.declare_subset(also_M)
sage: also_M.is_closed()
True
sage: A = M.subset('A')
sage: A.is_closed()
False
sage: A.declare_empty()
sage: A.is_closed()
True
sage: N = M.open_subset('N')
sage: N.is_closed()
False
sage: complement_N = M.subset('complement_N')
sage: M.declare_union(N, complement_N, disjoint=True)
sage: complement_N.is_closed()
True
"""
if self.manifold().is_subset(self):
return True
if self.is_empty():
return True
for other_name, intersection in self._intersections.items():
if intersection.is_empty():
other = self.manifold().subset_family()[other_name]
if other.is_open():
try:
union = self._unions[other_name]
except KeyError:
pass
else:
if union.is_open():
# self is complement of open other in open union
return True
return False
def open_covers(self, trivial=True, supersets=False):
r"""
Generate the open covers of the current subset.
If the current subset, `A` say, is a subset of the manifold `M`, an
*open cover* of `A` is a :class:`ManifoldSubsetFiniteFamily` `F`
of open subsets `U \in F` of `M` such that
.. MATH::
A \subset \bigcup_{U \in F} U.
If `A` is open, we ask that the above inclusion is actually an
identity:
.. MATH::
A = \bigcup_{U \in F} U.
.. NOTE::
To get the open covers as a family, sorted lexicographically by the
names of the subsets forming the open covers, use the method
:meth:`open_cover_family` instead.
INPUT:
- ``trivial`` -- (default: ``True``) if ``self`` is open, include the trivial
open cover of ``self`` by itself
- ``supersets`` -- (default: ``False``) if ``True``, include open covers of
all the supersets; it can also be an iterable of supersets to include
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: M.open_covers()
<generator ...>
sage: list(M.open_covers())
[Set {M} of open subsets of the 2-dimensional topological manifold M]
sage: U = M.open_subset('U')
sage: list(U.open_covers())
[Set {U} of open subsets of the 2-dimensional topological manifold M]
sage: A = U.open_subset('A')
sage: B = U.open_subset('B')
sage: U.declare_union(A,B)
sage: list(U.open_covers())
[Set {U} of open subsets of the 2-dimensional topological manifold M,
Set {A, B} of open subsets of the 2-dimensional topological manifold M]
sage: list(U.open_covers(trivial=False))
[Set {A, B} of open subsets of the 2-dimensional topological manifold M]
sage: V = M.open_subset('V')
sage: M.declare_union(U,V)
sage: list(M.open_covers())
[Set {M} of open subsets of the 2-dimensional topological manifold M,
Set {U, V} of open subsets of the 2-dimensional topological manifold M,
Set {A, B, V} of open subsets of the 2-dimensional topological manifold M]
"""
if supersets is False:
supersets = [self]
elif supersets is True:
supersets = self._supersets
for superset in supersets:
for oc in superset._open_covers:
if not trivial:
if any(x in supersets for x in oc):
continue
yield ManifoldSubsetFiniteFamily(oc)
def open_cover_family(self, trivial=True, supersets=False):
r"""
Return the family of open covers of the current subset.
If the current subset, `A` say, is a subset of the manifold `M`, an
*open cover* of `A` is a :class:`ManifoldSubsetFiniteFamily` `F`
of open subsets `U \in F` of `M` such that
.. MATH::
A \subset \bigcup_{U \in F} U.
If `A` is open, we ask that the above inclusion is actually an
identity:
.. MATH::
A = \bigcup_{U \in F} U.
The family is sorted lexicographically by the names of the subsets
forming the open covers.
.. NOTE::
If you only need to iterate over the open covers in arbitrary
order, you can use the generator method :meth:`open_covers`
instead.
INPUT:
- ``trivial`` -- (default: ``True``) if ``self`` is open, include the trivial
open cover of ``self`` by itself
- ``supersets`` -- (default: ``False``) if ``True``, include open covers of
all the supersets; it can also be an iterable of supersets to include
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: M.open_cover_family()
Set {{M}} of objects of the 2-dimensional topological manifold M
sage: U = M.open_subset('U')
sage: U.open_cover_family()
Set {{U}} of objects of the 2-dimensional topological manifold M
sage: A = U.open_subset('A')
sage: B = U.open_subset('B')
sage: U.declare_union(A,B)
sage: U.open_cover_family()
Set {{A, B}, {U}} of objects of the 2-dimensional topological manifold M
sage: U.open_cover_family(trivial=False)
Set {{A, B}} of objects of the 2-dimensional topological manifold M
sage: V = M.open_subset('V')
sage: M.declare_union(U,V)
sage: M.open_cover_family()
Set {{A, B, V}, {M}, {U, V}} of objects of | |
from detectron2.utils.logger import setup_logger
setup_logger()
import cv2, os, re
import numpy as np
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from densepose.config import add_densepose_config
from densepose.vis.extractor import DensePoseResultExtractor
import torch
import argparse
from scipy.spatial import ConvexHull
import pandas as pd
# files of config
densepose_keypoints_dir = os.path.join('output', 'segments')
openpose_keypoints_dir = os.path.join('output', 'data')
norm_segm_dir = os.path.join('output', 'pix')
# coarse segmentation:
# 0 = Background
# 1 = Torso,
# 2 = Right Hand, 3 = Left Hand, 4 = Left Foot, 5 = Right Foot,
# 6 = Upper Leg Right, 7 = Upper Leg Left, 8 = Lower Leg Right, 9 = Lower Leg Left,
# 10 = Upper Arm Left, 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
# 14 = Head
COARSE_ID = [
'Background',
'Torso',
'RHand', 'LHand', 'LFoot', 'RFoot',
'RThigh', 'LThigh', 'RCalf', 'LCalf',
'LUpperArm', 'RUpperArm', 'LLowerArm', 'RLowerArm',
'Head'
]
# BGRA -> alpha channel: 0 = transparent, 255 = non-transparent
COARSE_TO_COLOR = {
'Background': [255, 255, 255, 255],
'Torso': [191, 78, 22, 255],
'RThigh': [167, 181, 44, 255],
'LThigh': [141, 187, 91, 255],
'RCalf': [114, 191, 147, 255],
'LCalf': [96, 188, 192, 255],
'LUpperArm': [87, 207, 112, 255],
'RUpperArm': [55, 218, 162, 255],
'LLowerArm': [25, 226, 216, 255],
'RLowerArm': [37, 231, 253, 255],
'Head': [14, 251, 249, 255]
}
# fine segmentation:
# 0 = Background
# 1, 2 = Torso,
# 3 = Right Hand, 4 = Left Hand, 5 = Left Foot, 6 = Right Foot,
# 7, 9 = Upper Leg Right, 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right, 12, 14 = Lower Leg Left,
# 15, 17 = Upper Arm Left, 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left, 20, 22 = Lower Arm Right,
# 23, 24 = Head
FINE_TO_COARSE_SEGMENTATION = {
1: 1,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 6,
10: 7,
11: 8,
12: 9,
13: 8,
14: 9,
15: 10,
16: 11,
17: 10,
18: 11,
19: 12,
20: 13,
21: 12,
22: 13,
23: 14,
24: 14
}
# Body 25 Keypoints
JOINT_ID = [
'Nose', 'Neck',
'RShoulder', 'RElbow', 'RWrist', 'LShoulder', 'LElbow', 'LWrist',
'MidHip',
'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle',
'REye', 'LEye', 'REar', 'LEar',
'LBigToe', 'LSmallToe', 'LHeel', 'RBigToe', 'RSmallToe', 'RHeel',
'Background'
]
def _is_valid(keypoints):
# check the scores for each main keypoint, which MUST exist!
# main_keypoints = BODY BOX
main_keypoints = ['Nose', 'Neck', 'RShoulder', 'LShoulder', 'RHip', 'LHip', 'MidHip']
keypoints = dict(zip(JOINT_ID, keypoints))
# filter the main keypoints by score > 0
filtered_keypoints = [key for key, value in keypoints.items() if key in main_keypoints and value[2] > 0]
print('Number of valid keypoints (must be equal to 7):', len(filtered_keypoints))
if len(filtered_keypoints) != 7:
return False
else:
return True
def _extract_i_from_iuvarr(iuv_arr):
return iuv_arr[0, :, :]
def _extract_u_from_iuvarr(iuv_arr):
return iuv_arr[1, :, :]
def _extract_v_from_iuvarr(iuv_arr):
return iuv_arr[2, :, :]
def _extract_segm(result_densepose, is_coarse=True):
iuv_array = torch.cat(
(result_densepose.labels[None].type(torch.float32), result_densepose.uv * 255.0)
).type(torch.uint8)
iuv_array = iuv_array.cpu().numpy()
segm = _extract_i_from_iuvarr(iuv_array)
if is_coarse:
for fine_idx, coarse_idx in FINE_TO_COARSE_SEGMENTATION.items():
segm[segm == fine_idx] = coarse_idx
mask = np.zeros(segm.shape, dtype=np.uint8)
mask[segm > 0] = 1
return mask, segm
def _segm_xy(segm, segm_id, box_xywh):
# bbox
box_x, box_y, box_w, box_h = np.array(box_xywh).astype(int)
y, x = np.where(segm == segm_id)
# translate from the bbox coordinate to the original image coordinate
return list(zip(x+box_x, y+box_y))
def _get_dict_of_segm_and_keypoints(segm, keypoints, box_xywh):
segm_xy_list = []
bg_xy = [] # 0
segm_xy_list.append(bg_xy)
torso_xy = _segm_xy(segm=segm, segm_id=1, box_xywh=box_xywh)
segm_xy_list.append(torso_xy)
r_hand_xy = [] # 2
l_hand_xy = [] # 3
l_foot_xy = [] # 4
r_foot_xy = [] # 5
segm_xy_list.append(r_hand_xy)
segm_xy_list.append(l_hand_xy)
segm_xy_list.append(l_foot_xy)
segm_xy_list.append(r_foot_xy)
r_thigh_xy = _segm_xy(segm=segm, segm_id=6, box_xywh=box_xywh)
l_thigh_xy = _segm_xy(segm=segm, segm_id=7, box_xywh=box_xywh)
r_calf_xy = _segm_xy(segm=segm, segm_id=8, box_xywh=box_xywh)
l_calf_xy = _segm_xy(segm=segm, segm_id=9, box_xywh=box_xywh)
segm_xy_list.append(r_thigh_xy)
segm_xy_list.append(l_thigh_xy)
segm_xy_list.append(r_calf_xy)
segm_xy_list.append(l_calf_xy)
l_upper_arm_xy = _segm_xy(segm=segm, segm_id=10, box_xywh=box_xywh)
r_upper_arm_xy = _segm_xy(segm=segm, segm_id=11, box_xywh=box_xywh)
l_lower_arm_xy = _segm_xy(segm=segm, segm_id=12, box_xywh=box_xywh)
r_lower_arm_xy = _segm_xy(segm=segm, segm_id=13, box_xywh=box_xywh)
segm_xy_list.append(l_upper_arm_xy)
segm_xy_list.append(r_upper_arm_xy)
segm_xy_list.append(l_lower_arm_xy)
segm_xy_list.append(r_lower_arm_xy)
head_xy = _segm_xy(segm=segm, segm_id=14, box_xywh=box_xywh)
segm_xy_list.append(head_xy)
# segments dictionary
segm_xy_dict = dict(zip(COARSE_ID, segm_xy_list))
# keypoints dictionary
keypoints = np.array(keypoints).astype(int)
keypoints_dict = dict(zip(JOINT_ID, keypoints))
return segm_xy_dict, keypoints_dict
def _segm_xy_centroid(segm_xy):
size = len(segm_xy)
x = [x for x, y in segm_xy if not np.isnan(x)]
y = [y for x, y in segm_xy if not np.isnan(y)]
centroid = (sum(x) / size, sum(y) / size)
return centroid
def _get_dict_of_midpoints(segm_xy_dict, keypoints_dict):
midpoints_dict = {}
# head centroid
head_centroid_x, head_centroid_y = _segm_xy_centroid(segm_xy_dict['Head'])
midpoints_dict['Head'] = np.array([head_centroid_x, head_centroid_y])
# torso midpoint
midpoints_dict['Torso'] = (keypoints_dict['Neck'] + keypoints_dict['MidHip']) / 2
# upper limbs
midpoints_dict['RUpperArm'] = (keypoints_dict['RShoulder'] + keypoints_dict['RElbow']) / 2
midpoints_dict['RLowerArm'] = (keypoints_dict['RElbow'] + keypoints_dict['RWrist']) / 2
midpoints_dict['LUpperArm'] = (keypoints_dict['LShoulder'] + keypoints_dict['LElbow']) / 2
midpoints_dict['LLowerArm'] = (keypoints_dict['LElbow'] + keypoints_dict['LWrist']) / 2
# lower limbs
midpoints_dict['RThigh'] = (keypoints_dict['RHip'] + keypoints_dict['RKnee']) / 2
midpoints_dict['RCalf'] = (keypoints_dict['RKnee'] + keypoints_dict['RAnkle']) / 2
midpoints_dict['LThigh'] = (keypoints_dict['LHip'] + keypoints_dict['LKnee']) / 2
midpoints_dict['LCalf'] = (keypoints_dict['LKnee'] + keypoints_dict['LAnkle']) / 2
return midpoints_dict
def _calc_angle(point1, center, point2):
try:
a = np.array(point1)[0:2] - np.array(center)[0:2]
b = np.array(point2)[0:2] - np.array(center)[0:2]
cos_theta = np.dot(a, b)
sin_theta = np.cross(a, b)
rad = np.arctan2(sin_theta, cos_theta)
deg = np.rad2deg(rad)
if np.isnan(rad):
return 0, 0
return rad, deg
except:
return 0, 0
def _get_dict_of_rotated_angles(keypoints_dict, midpoints_dict):
rotated_angles_dict = {}
# head
reference_point = np.array(keypoints_dict['Neck']) + np.array((0, -100, 0))
rad, deg = _calc_angle(point1=midpoints_dict['Head'], center=keypoints_dict['Neck'], point2=reference_point)
rotated_angles_dict['Head'] = rad
# torso
reference_point = np.array(keypoints_dict['MidHip']) + np.array((0, -100, 0))
rad, deg = _calc_angle(point1=keypoints_dict['Neck'], center=keypoints_dict['MidHip'], point2=reference_point)
rotated_angles_dict['Torso'] = rad
# upper limbs
reference_point = np.array(keypoints_dict['RShoulder']) + np.array((-100, 0, 0))
rad, deg = _calc_angle(point1=keypoints_dict['RElbow'], center=keypoints_dict['RShoulder'], point2=reference_point)
rotated_angles_dict['RUpperArm'] = rad
reference_point = np.array(keypoints_dict['RElbow']) + np.array((-100, 0, 0))
rad, deg = _calc_angle(point1=keypoints_dict['RWrist'], center=keypoints_dict['RElbow'], point2=reference_point)
rotated_angles_dict['RLowerArm'] = rad
reference_point = np.array(keypoints_dict['LShoulder']) + np.array((100, 0, 0))
rad, deg = _calc_angle(point1=keypoints_dict['LElbow'], center=keypoints_dict['LShoulder'], point2=reference_point)
rotated_angles_dict['LUpperArm'] = rad
reference_point = np.array(keypoints_dict['LElbow']) + np.array((100, 0, 0))
rad, deg = _calc_angle(point1=keypoints_dict['LWrist'], center=keypoints_dict['LElbow'], point2=reference_point)
rotated_angles_dict['LLowerArm'] = rad
# lower limbs
reference_point = np.array(keypoints_dict['RHip']) + np.array((0, 100, 0))
rad, deg = _calc_angle(point1=keypoints_dict['RKnee'], center=keypoints_dict['RHip'], point2=reference_point)
rotated_angles_dict['RThigh'] = rad
reference_point = np.array(keypoints_dict['RKnee']) + np.array((0, 100, 0))
rad, deg = _calc_angle(point1=keypoints_dict['RAnkle'], center=keypoints_dict['RKnee'], point2=reference_point)
rotated_angles_dict['RCalf'] = rad
reference_point = np.array(keypoints_dict['LHip']) + np.array((0, 100, 0))
rad, deg = _calc_angle(point1=keypoints_dict['LKnee'], center=keypoints_dict['LHip'], point2=reference_point)
rotated_angles_dict['LThigh'] = rad
reference_point = np.array(keypoints_dict['LKnee']) + np.array((0, 100, 0))
rad, deg = _calc_angle(point1=keypoints_dict['LAnkle'], center=keypoints_dict['LKnee'], point2=reference_point)
rotated_angles_dict['LCalf'] = rad
return rotated_angles_dict
def _draw_segm_and_keypoints(image, segm_xy_dict, keypoints_dict):
# head
for x, y in segm_xy_dict['Head']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['Head'], -1)
cv2.circle(image, (keypoints_dict['Nose'][0], keypoints_dict['Nose'][1]), 5, (255, 0, 255), -1)
# torso
for x, y in segm_xy_dict['Torso']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['Torso'], -1)
cv2.circle(image, (keypoints_dict['Neck'][0], keypoints_dict['Neck'][1]), 5, (255, 0, 255), -1)
# upper limbs
for x, y in segm_xy_dict['RUpperArm']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['RUpperArm'], -1)
for x, y in segm_xy_dict['RLowerArm']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['RLowerArm'], -1)
for x, y in segm_xy_dict['LUpperArm']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['LUpperArm'], -1)
for x, y in segm_xy_dict['LLowerArm']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['LLowerArm'], -1)
cv2.circle(image, (keypoints_dict['RShoulder'][0], keypoints_dict['RShoulder'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['RElbow'][0], keypoints_dict['RElbow'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['RWrist'][0], keypoints_dict['RWrist'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LShoulder'][0], keypoints_dict['LShoulder'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LElbow'][0], keypoints_dict['LElbow'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LWrist'][0], keypoints_dict['LWrist'][1]), 5, (255, 0, 255), -1)
# lower limbs
for x, y in segm_xy_dict['RThigh']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['RThigh'], -1)
for x, y in segm_xy_dict['RCalf']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['RCalf'], -1)
for x, y in segm_xy_dict['LThigh']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['LThigh'], -1)
for x, y in segm_xy_dict['LCalf']:
cv2.circle(image, (x, y), 1, COARSE_TO_COLOR['LCalf'], -1)
cv2.circle(image, (keypoints_dict['MidHip'][0], keypoints_dict['MidHip'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['RHip'][0], keypoints_dict['RHip'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['RKnee'][0], keypoints_dict['RKnee'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['RAnkle'][0], keypoints_dict['RAnkle'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LHip'][0], keypoints_dict['LHip'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LKnee'][0], keypoints_dict['LKnee'][1]), 5, (255, 0, 255), -1)
cv2.circle(image, (keypoints_dict['LAnkle'][0], keypoints_dict['LAnkle'][1]), 5, (255, 0, 255), -1)
cv2.imshow('original', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def _euclidian(point1, point2):
return np.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)
def _remove_outlier(segm_xy):
# outlier factor
factor = 2
# mean of [x, y]
xy_mean = np.mean(segm_xy, axis=0)
# mean distance between [x, y] and mean of [x, y]
distance_mean = np.mean([_euclidian(xy, xy_mean) for xy in segm_xy])
# remove outliers from segm_xy
segm_xy_without_outliers = [xy for xy in segm_xy if _euclidian(xy, xy_mean) <= distance_mean * factor]
return segm_xy_without_outliers
def | |
job_run["StartedOn"]
end = job_run["CompletedOn"] if "CompletedOn" in job_run else None
execution_details = ComputeExecutionDetails(start, end, dict(job_run))
session_state = get_glue_job_run_state_type(job_run)
if session_state == ComputeSessionStateType.FAILED:
failure_type = get_glue_job_run_failure_type(job_run)
return ComputeFailedSessionState(failure_type, session_desc, [execution_details])
elif session_state == ComputeSessionStateType.COMPLETED:
# STATE CHANGE: Glue Prologue -> CTAS query
compute_response = self._ctas_compute(
active_compute_record.materialized_inputs,
active_compute_record.slot,
active_compute_record.materialized_output,
active_compute_record.execution_context_id,
session_desc,
)
if compute_response.response_type == ComputeResponseType.SUCCESS:
return ComputeSessionState(compute_response.session_desc, ComputeSessionStateType.PROCESSING, [execution_details])
else:
failed_state: ComputeFailedResponse = cast(ComputeFailedResponse, compute_response)
if failed_state.failed_response_type == ComputeFailedResponseType.TRANSIENT:
# ignore the completion and force PROCESSING,
# in the next cycle CTAS state transition will be attempted again
return ComputeSessionState(session_desc, ComputeSessionStateType.PROCESSING, [execution_details])
else:
failure_time = datetime.utcnow()
return ComputeFailedSessionState(
ComputeFailedSessionStateType.UNKNOWN,
session_desc,
[execution_details]
+ [
ComputeExecutionDetails(
str(failure_time),
str(failure_time),
{"ErrorMessage": f"Athena CTAS Query execution could not be started! Response: {compute_response!r}"},
)
],
)
# 1st stage (Glue Prologue) is still running, return job run session state
return ComputeSessionState(session_desc, session_state, [execution_details])
else:
# STATE: CTAS compute
return self._get_ctas_session_state(session_desc, active_compute_record)
def terminate_session(self, active_compute_record: "RoutingTable.ComputeRecord") -> None:
if active_compute_record.session_state.state_type == ComputeSessionStateType.COMPLETED:
# CTAS EPILOGUE
# first map materialized output into internal signal form
output = self.get_platform().storage.map_materialized_signal(active_compute_record.materialized_output)
path = output.get_materialized_resource_paths()[0]
path_sep = output.resource_access_spec.path_delimiter()
# 1- save CTAS schema (CTAS output does not have HEADER), so that downtream nodes will have a chance to
# reconstruct.
ctas_table_name = create_output_table_name(
active_compute_record.materialized_output.alias, active_compute_record.execution_context_id
)
response = get_table_metadata(self._athena, self._database_name, ctas_table_name)
schema_data = json.dumps(response["TableMetadata"]["Columns"])
schema_file = output.resource_access_spec.data_schema_file
self.get_platform().storage.save(schema_data, [], path.strip(path_sep) + path_sep + schema_file)
# 2- activate completion, etc
if output.domain_spec.integrity_check_protocol:
from intelliflow.core.signal_processing.analysis import INTEGRITY_CHECKER_MAP
integrity_checker = INTEGRITY_CHECKER_MAP[output.domain_spec.integrity_check_protocol.type]
completion_resource_name = integrity_checker.get_required_resource_name(
output.resource_access_spec, output.domain_spec.integrity_check_protocol
)
if completion_resource_name: # ex: _SUCCESS file/object
# TODO evaluate Storage::add_to_internal(signal, sub_folder, data, object_name)
# see 'data_emulation::add_test_data' also.
folder = path[path.find(output.resource_access_spec.FOLDER) :]
self.get_platform().storage.save("", [folder], completion_resource_name)
# cleanup no matter what the state is
self._cleanup_ctas_table(active_compute_record)
self._cleanup_ctas_prologue_artifacts(active_compute_record)
def _ctas_compute(
self,
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
ctas_query_name: str = f"IntelliFlow_Athena_CTAS_{materialized_output.alias}_{execution_ctx_id}"
# 0- Clean up the output partition
# always try to do this since even after a retry on ctas_compute there might be some partial
# data left over from previous attempt (so don't check session_desc or existence of resource_path in it).
output_as_internal = self.get_platform().storage.map_materialized_signal(materialized_output)
partition_completely_wiped_out = exponential_retry(self.get_platform().storage.delete_internal, [], output_as_internal)
if not partition_completely_wiped_out:
# return TRANSIENT error and let the orchestration retry in the next cycle.
return ComputeFailedResponse(
ComputeFailedResponseType.TRANSIENT,
ComputeResourceDesc(ctas_query_name, None, driver=self.__class__),
"PARTITION CANNOT BE DELETED",
f"The following output ({materialized_output.alias!r}) partition could not be deleted "
f"before Athena CTAS query stage: {materialized_output.get_materialized_resource_paths()[0]}",
)
user_query = slot.code
# 1- parametrize the query with user provided params
# but more importantly with dynamic dimensions of the output
# Example:
# select * from athena_input where region_name = {region}
# ->
# select * from athena_input where region_name =
extra_params: Dict[str, Any] = dict(slot.extra_params)
extra_params.update(
{
dimension.name: dimension.value
for dimension in materialized_output.domain_spec.dimension_filter_spec.get_flattened_dimension_map().values()
}
)
if extra_params:
user_query = user_query.format_map(extra_params)
# 2- if any inputs, then replace their alias with view names from the prologue stage
for input_signal in materialized_inputs:
athena_view_name_from_prologue = create_input_filtered_view_name(input_signal.alias, execution_ctx_id)
user_query = user_query.replace(input_signal.alias, athena_view_name_from_prologue)
ctas_table_name = create_output_table_name(materialized_output.alias, execution_ctx_id)
# 3- turn user query into a CTAS query
dataset_access_spec = cast(DatasetSignalSourceAccessSpec, materialized_output.resource_access_spec)
data_format: DatasetSignalSourceFormat = dataset_access_spec.data_format
data_delimiter: str = dataset_access_spec.data_delimiter
data_encoding: str = dataset_access_spec.data_encoding
data_header_exists: bool = dataset_access_spec.data_header_exists
data_compression: Optional[str] = dataset_access_spec.data_compression
output_partition_path = materialized_output.get_materialized_resource_paths()[0]
if not output_partition_path.rstrip().endswith("/"):
output_partition_path = output_partition_path + "/"
# Refer
# https://docs.aws.amazon.com/athena/latest/ug/create-table-as.html
ctas_query: str = f"""CREATE TABLE {ctas_table_name}
WITH (
external_location = '{output_partition_path}'"""
if data_format == DatasetSignalSourceFormat.PARQUET:
ctas_query = (
ctas_query
+ f""",
format='PARQUET'"""
)
if data_compression:
ctas_query = (
ctas_query
+ f""",
parquet_compression = '{data_compression.upper()}'
"""
)
elif data_format == DatasetSignalSourceFormat.ORC:
ctas_query = (
ctas_query
+ f""",
format='ORC'"""
)
if data_compression:
ctas_query = (
ctas_query
+ f""",
orc_compression = '{data_compression.upper()}'
"""
)
elif data_format == DatasetSignalSourceFormat.CSV:
ctas_query = (
ctas_query
+ f""",
format='TEXTFILE'"""
)
if data_delimiter:
ctas_query = (
ctas_query
+ f""",
field_delimiter = '{data_delimiter}'
"""
)
elif data_format == DatasetSignalSourceFormat.JSON:
ctas_query = (
ctas_query
+ f""",
format='JSON'"""
)
elif data_format == DatasetSignalSourceFormat.AVRO:
ctas_query = (
ctas_query
+ f""",
format='AVRO'"""
)
else:
# inform user about Athena's default behaviour
module_logger.critical(
f"Using PARQUET by default as the output data format for Athena execution" f" output: {output_partition_path!r}."
)
# close the WITH block
ctas_query = (
ctas_query
+ f"""
)
"""
)
# finally append the tranformed user query
ctas_query = (
ctas_query
+ f""" AS
{user_query}
"""
)
if not user_query.rstrip().endswith(";"):
ctas_query = ctas_query + ";"
try:
# make sure that CTAS table is dropped (safeguard against a failure in a previous cleanup)
query(self._athena, f"DROP TABLE IF EXISTS {ctas_table_name}", self._database_name, self._workgroup_id, wait=True)
# run the query!
ctas_execution_id = query(self._athena, ctas_query, self._database_name, self._workgroup_id, wait=False)
except ClientError as error:
error_code = error.response["Error"]["Code"]
if error_code in ATHENA_CLIENT_RETRYABLE_EXCEPTION_LIST or error_code in AWS_COMMON_RETRYABLE_ERRORS:
failed_response_type = ComputeFailedResponseType.TRANSIENT
else:
failed_response_type = ComputeFailedResponseType.BAD_SLOT
return ComputeFailedResponse(
failed_response_type,
ComputeResourceDesc(ctas_query_name, ctas_table_name, driver=self.__class__),
error_code,
str(error.response["Error"]),
)
return ComputeSuccessfulResponse(
ComputeSuccessfulResponseType.PROCESSING,
ComputeSessionDesc(ctas_execution_id, ComputeResourceDesc(ctas_query_name, ctas_table_name, driver=self.__class__)),
)
def _get_ctas_session_state(
self, session_desc: ComputeSessionDesc, active_compute_record: RoutingTable.ComputeRecord
) -> ComputeSessionState:
"""
Check the state of CTAS query from _ctas_compute and do a clean up if completion is detected.
"""
ctas_query_execution_id = session_desc.session_id
response = exponential_retry(
self._athena.get_query_execution, ATHENA_CLIENT_RETRYABLE_EXCEPTION_LIST, QueryExecutionId=ctas_query_execution_id
)
# state: QUEUED | RUNNING | SUCCEEDED | FAILED | CANCELLED
status = response["QueryExecution"]["Status"]
# now let's interpret the response in our Compute model
start = status.get("SubmissionDateTime", None)
end = status.get("CompletionDateTime", None)
execution_details = ComputeExecutionDetails(start, end, dict(response))
state = status["State"]
session_state = get_athena_query_execution_state_type(state)
# dont do cleanups blindly here, do it terminate_session which signals the end of the entire session.
# especially in failure case, here we don't know whether orchestration will retry
# even with non-transient failure type (based on user retry conf).
# so we cannot cleanup prologue artifacts (ctas table cleanup is safe since each compute should recreate it).
if session_state == ComputeSessionStateType.COMPLETED:
# See terminate_session
# self._cleanup_ctas_table(active_compute_record)
# self._cleanup_ctas_prologue_artifacts(active_compute_record)
pass
elif session_state == ComputeSessionStateType.FAILED:
failure_type = get_athena_query_execution_failure_type(response)
if failure_type != ComputeFailedSessionStateType.TRANSIENT:
# See terminate session
# self._cleanup_ctas_table(active_compute_record)
# we cannot do this here since we don't know if it is going to be retried (e.g user set retry_count)
# #self._cleanup_ctas_prologue_artifacts(active_compute_record)
pass
return ComputeFailedSessionState(failure_type, session_desc, [execution_details])
return ComputeSessionState(session_desc, session_state, [execution_details])
def _cleanup_ctas_table(self, active_compute_record: RoutingTable.ComputeRecord) -> None:
ctas_table_name = create_output_table_name(
active_compute_record.materialized_output.alias, active_compute_record.execution_context_id
)
query(self._athena, f"DROP TABLE IF EXISTS {ctas_table_name}", self._database_name, self._workgroup_id, wait=False)
def _cleanup_ctas_prologue_artifacts(self, active_compute_record: RoutingTable.ComputeRecord) -> None:
execution_ctx_id = active_compute_record.execution_context_id
for input_signal in active_compute_record.materialized_inputs:
athena_table_name_from_prologue = create_input_table_name(input_signal.alias, execution_ctx_id)
query(
self._athena, f"DROP TABLE IF EXISTS {athena_table_name_from_prologue}", self._database_name, self._workgroup_id, wait=False
)
athena_view_name_from_prologue = create_input_filtered_view_name(input_signal.alias, execution_ctx_id)
query(
self._athena, f"DROP VIEW IF EXISTS {athena_view_name_from_prologue}", self._database_name, self._workgroup_id, wait=False
)
@classmethod
def _build_database_name(cls, params: ConstructParamsDict):
return to_athena_format(params[ActivationParams.UNIQUE_ID_FOR_CONTEXT])
def dev_init(self, platform: "DevelopmentPlatform") -> None:
super().dev_init(platform)
# OPTIMIZATION: This impl relies on S3 based Storage for seamless integration other upstream/downstream
# internal data.
storage_path: str = platform.storage.get_storage_resource_path()
if not (storage_path.startswith("arn:aws:s3") or storage_path.startswith("s3:")):
raise TypeError(f"{self.__class__.__name__} driver should be used with an S3 based Storage!")
self._database_name = self._build_database_name(self._params)
# Refer
# https://docs.aws.amazon.com/athena/latest/ug/glue-best-practices.html
# Database, Table, and Column Names
# When you create schema in AWS Glue to query in Athena, consider the following:
# - A database name cannot be longer than 252 characters.
# - A table name cannot be longer than 255 characters.
# - A column name cannot be longer than 128 characters.
# The only acceptable characters for database names, table names, and column names are lowercase letters, numbers, and the underscore character.
if len(self._database_name) > 252:
raise ValueError(f"Long application name {platform.context_id!r} caused internal Athena database name to exceed 252!")
# workgroup primitives
self._workgroup_id = self._params[ActivationParams.UNIQUE_ID_FOR_CONTEXT]
internal_data_bucket_name = self.get_platform().storage.bucket_name
self._output_location = f"s3://{internal_data_bucket_name}/driver_data/{self.__class__.__name__}/query_location/"
# CTAS prologue Glue job
self._glue_job_lang_map: Dict[GlueJobLanguage, Dict[str, Dict[str, Any]]] = {
GlueJobLanguage.PYTHON: {
"2.0": {
"job_name": "",
"job_arn": "",
"boilerplate": GlueAthenaCTASPrologue,
"suffix": "v2_0",
"ext": "py",
"params": {
"WorkerType": GlueWorkerType.G_1X.value,
"NumberOfWorkers": 20, # heuristical: optimal # of instances to read 'schema' operation in Spark.
"GlueVersion": "2.0", # FUTURE analyze advantages of 3.0 over 2.0 as our athena prologue
},
}
}
}
for lang, lang_spec in self._glue_job_lang_map.items():
for version, | |
"""
Each source in a data set has file(s) that contain keys and meta info file.
To read from a dataset, supply the path to a meta file to DataSet.prepare().
To read multiple data sets, use DataSet.find().
Then get a DataSetIterator using DataSet.iterator().
To output keys, use Writer.import_meta() with the proper DataSetMeta.
Example usage can be found in tasks.convert().
"""
import json
import copy
import csv
import os
import gzip
import io
import hashlib
import rsabias.core.key as key
class DataSetException(Exception):
pass
class DataSetMeta:
""" Data set metadata - source, details, list of files, ... """
class File:
""" A file in the data set, for a single source and key length. """
def __init__(self, name, records, digest):
self.name = name # file name
self.records = records # number of records (keys)
self.digest = digest # hash of the decompressed content
@staticmethod
def import_dict(d):
name = d.get('name', None)
records = d.get('records', None)
digest = d.get('sha256', None)
return DataSetMeta.File(name, records, digest)
def export_dict(self):
return {'name': self.name, 'records': self.records,
'sha256': self.digest}
class Details:
""" Detailed information about a data set. """
def __init__(self, base_dict, bitlen, category, compressed, fips_mode,
ds_format, header, name, public_only, separator, version, group=None):
self.base_dict = base_dict # dictionary "feature name: base"
self.bitlen = bitlen # binary length of the modulus (e.g., 2048)
self.category = category # source type (e.g., Library, Card, HSM)
self.compressed = compressed # compressed with GZIP or plain text
self.fips_mode = fips_mode # FIPS mode of a library was active
self.format = ds_format # data set format (e.g., "json", "csv")
self.header = header # CSV header as a list of strings
self.name = name # name of the source (e.g. "OpenSSL")
self.public_only = public_only # only public keys available
self.separator = separator # CSV separator (e.g., ";", ",")
self.version = version # source version (e.g., "1.0.2g")
self.group = group
@staticmethod
def import_dict(d):
base_dict = d.get('base_dict', None)
bitlen = d.get('bitlen', None)
category = d.get('category', None)
compressed = d.get('compressed', None)
fips_mode = d.get('fips_mode', None)
ds_format = d.get('format', None)
header = d.get('header', None)
name = d.get('name', None)
public_only = d.get('public_only', None)
separator = d.get('separator', None)
version = d.get('version', None)
group = d.get('group', None)
return DataSetMeta.Details(base_dict, bitlen, category, compressed,
fips_mode, ds_format, header, name,
public_only, separator, version, group)
def export_dict(self):
return {'base_dict': self.base_dict, 'bitlen': self.bitlen,
'category': self.category, 'compressed': self.compressed,
'fips_mode': self.fips_mode, 'format': self.format,
'header': self.header, 'name': self.name,
'public_only': self.public_only,
'separator': self.separator, 'version': self.version, 'group': self.group}
# TODO opaque notes, currently undefined properties are not copied over
def __init__(self, ds_type, files, details):
self.type = ds_type
self.files = files
self.details = details
@staticmethod
def import_dict(meta):
ds_type = meta.get('type', None)
files = [DataSetMeta.File.import_dict(f)
for f in meta.get('files', [])]
details = DataSetMeta.Details.import_dict(meta.get('details', []))
return DataSetMeta(ds_type, files, details)
def export_dict(self):
return {'type': self.type,
'files': [f.export_dict() for f in self.files],
'details': self.details.export_dict()}
def count_records(self):
total = 0
for f in self.files:
total += f.records
return total
def source(self):
"""
Return the name of the source as a tuple.
(Category, Name, Version [FIPS], Bit length [PUBLIC])
:return: a tuple representing the name of the source
"""
is_fips = ' FIPS' if self.details.fips_mode else ''
is_public = ' PUBLIC' if self.details.public_only else ''
cat = self.details.category
return (cat if cat else 'No category',
self.details.name,
self.details.version + is_fips,
str(self.details.bitlen) + is_public)
def source_path(self):
"""
Return the typical directory path for the data set.
:return: relative directory path, where the data set should be found
"""
parts = self.source()
path = ''
for p in parts:
# strip dot to avoid making hidden directories
path = os.path.join(path, str(p).lstrip('.'))
return path
def get_full_name(self):
fps_mode = ' '
if self.details.fips_mode is True:
fps_mode = ' FIPS '
return self.details.category + ' ' + self.details.name + ' ' + self.details.version + fps_mode + str(self.details.bitlen)
class DataSet:
"""
Data set (of keys) that can be instantiated from metadata.
Provides transparent access to keys in different file types
using a forward only iterator.
"""
def __init__(self, meta):
self.meta = meta
@staticmethod
def import_meta(meta, path=None):
"""
Create from meta data (DataSetMeta).
:param meta: DataSetMeta meta data
:param path: TODO ??
:return: DataSet
"""
if not meta.type:
raise DataSetException('Missing file type:\n{}'.format(meta))
if meta.type == 'reference':
return ReferenceDataSet(meta, path)
raise DataSetException(
'Unsupported data set type "{}"'.format(meta.type))
@staticmethod
def __find_paths(start, filename):
parent_paths = []
for root, dirs, files in os.walk(start):
if filename in files:
parent_paths.append(root)
return parent_paths
@staticmethod
def prepare(path, meta_filename):
"""
Create DataSet from path with the the metadata file
:param path: directory path to the meta file
:param meta_filename: name of the meta file (e.g., "meta.json")
:return: DataSet
"""
meta_path = os.path.join(path, meta_filename)
with open(meta_path) as fp:
meta = DataSetMeta.import_dict(json.load(fp))
return DataSet.import_meta(meta, path)
@staticmethod
def find(start, meta_name='meta.json'):
"""
Finds all data sets starting from a root path.
:param start: root directory path
:param meta_name: file name of the meta file
:return: list of DataSet
"""
parent_paths = DataSet.__find_paths(start, meta_name)
return [DataSet.prepare(path, meta_name) for path in parent_paths]
def iterator(self, prime_wise=False):
"""
Get an iterator for the data set.
:return: iterator
"""
pass
def __str__(self):
return str(vars(self))
def export_json(self):
return json.dumps(vars(self), sort_keys=False, indent=4)
class CatchIterator:
"""
Iterator wrapper that catches exceptions when keys cannot be parsed.
"""
def __init__(self, iter, name):
self.iter = iter
self.name = name
def __iter__(self):
return self
def __next__(self):
while True:
try:
return self.iter.__next__()
except key.KeyException as err:
print('Malformed key in dataset {}'.format(self.name))
print(err.args)
class ReferenceDataSet(DataSet):
"""
Reference data set of keys generated from a source.
"""
def __init__(self, meta, path):
DataSet.__init__(self, meta)
self.path = path
def iterator(self, prime_wise=False):
"""
Get an iterator for the data set.
:return: iterator
"""
format_ = self.meta.details.format
bases = self.meta.details.base_dict
cmp = self.meta.details.compressed
files = self.meta.files
path = self.path
sep = self.meta.details.separator
sep = sep if sep else ';'
it = None
if format_ == 'csv':
it = CSVFileIterator(files, self.path, bases, compressed=cmp, separator=sep)
if format_ == 'json':
it = JSONFileIterator(files, path, bases, compressed=cmp)
if format_ == 'jcalgtest':
it = AlgTestFileIterator(files, path, bases, compressed=cmp)
if format_ == 'multiline':
it = MultiLineFileIterator(files, path, bases, compressed=cmp)
if format_ == 'multilinebase64':
it = MultiLineBase64FileIterator(files, path, bases, compressed=cmp)
if format_ == 'asn1':
it = ASN1FileIterator(files, path, bases, compressed=cmp)
if format_ == 'pem':
it = PEMFileIterator(files, path, bases, compressed=cmp)
if format_ == 'tpm_multiline':
it = TPMMultiLineFileIterator(files, path, bases, compressed=cmp)
if format_ == 'tpm_modulus':
it = TPMModulusFileIterator(files, path, bases, compressed=cmp)
if format_ == 'tpm_xml':
it = TPMXMLFileIterator(files, path, bases, compressed=cmp)
if it is None:
raise DataSetException('Unsupported DS format "{}"'.format(format_))
if prime_wise:
return SinglePrimeIterator(it)
return it
class HashIO(io.BytesIO):
"""
Wrapper for hashing files as they are read/written.
Hashing should happen on the text files, since compression
may result in different binary files.
"""
def __init__(self, file):
super().__init__()
self.file = file
self.hash = hashlib.sha256()
def read(self, size=-1):
read_buf = self.file.read(size)
if read_buf:
self.hash.update(read_buf)
return read_buf
def read1(self, size=-1):
read_buf = self.file.read1(size)
if read_buf:
self.hash.update(read_buf)
return read_buf
def readinto(self, b):
read = self.file.readinto(b)
if read > 0:
self.hash.update(b[0:read])
return read
def readinto1(self, b):
read = self.file.readinto1(b)
if read > 0:
self.hash.update(b[0:read])
return read
def write(self, b):
written = self.file.write(b)
if written > 0:
self.hash.update(b[0:written])
return written
def __next__(self):
next_ = self.file.__next__()
self.hash.update(next_)
return next_
def digest(self):
return self.hash.hexdigest()
def close(self):
self.file.close()
class DataSetIterator:
""" Iterate over a data set. """
def __init__(self):
pass
def __iter__(self):
return self
def __next__(self):
pass
class FileDataSetIterator(DataSetIterator):
""" Iterate over data saved in files. """
def __init__(self, files, path, base_dict, binary=False, compressed=True,
check_hash=True, separator=None):
super().__init__()
self.files = copy.deepcopy(files) # list of files from DataSetMeta
self.files.reverse()
self.current_file = None # currently open file
self.path = path # TODO
self.base_dict = base_dict # dictionary "feature name: base"
self.binary = binary # True if binary data, False if text
self.compressed = compressed # True if GZIP compressed
self.check_hash = check_hash # if True, check hash when reading
self.separator = separator # CSV separator
def __next__(self):
if self.current_file is None:
if not self.files: # empty list
raise StopIteration
next_file = self.files.pop()
filename = os.path.join(self.path, next_file.name)
file = open(filename, mode='rb')
self.hash = self.check_hash and next_file.digest
if self.hash:
self.original_hash = next_file.digest
self.hash_file = None
if | |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1656, 962)
MainWindow.setAutoFillBackground(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(10, 280, 190, 30))
self.label_3.setMouseTracking(False)
self.label_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.listWidget = QtWidgets.QListWidget(self.tab)
self.listWidget.setGeometry(QtCore.QRect(410, 30, 211, 192))
self.listWidget.setObjectName("listWidget")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(470, 230, 93, 28))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(630, 0, 190, 30))
self.label_2.setMouseTracking(False)
self.label_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(10, 310, 931, 191))
self.tableWidget.setAutoFillBackground(False)
self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget.setAlternatingRowColors(False)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(1)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setItem(0, 2, item)
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setDefaultSectionSize(225)
self.tableWidget.horizontalHeader().setHighlightSections(False)
self.tableWidget.horizontalHeader().setMinimumSectionSize(125)
self.tableWidget.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setDefaultSectionSize(28)
self.tableWidget.verticalHeader().setMinimumSectionSize(28)
self.pushButton_2 = QtWidgets.QPushButton(self.tab)
self.pushButton_2.setGeometry(QtCore.QRect(850, 230, 93, 28))
self.pushButton_2.setObjectName("pushButton_2")
self.listWidget_2 = QtWidgets.QListWidget(self.tab)
self.listWidget_2.setGeometry(QtCore.QRect(630, 30, 311, 192))
self.listWidget_2.setObjectName("listWidget_2")
self.label_4 = QtWidgets.QLabel(self.tab)
self.label_4.setGeometry(QtCore.QRect(10, 0, 190, 30))
self.label_4.setMouseTracking(False)
self.label_4.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(410, 0, 190, 30))
self.label.setMouseTracking(False)
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(self.tab)
self.lineEdit.setGeometry(QtCore.QRect(710, 230, 131, 28))
self.lineEdit.setObjectName("lineEdit")
self.textBrowser = QtWidgets.QTextBrowser(self.tab)
self.textBrowser.setGeometry(QtCore.QRect(10, 30, 391, 191))
self.textBrowser.setObjectName("textBrowser")
self.pushButton_10 = QtWidgets.QPushButton(self.tab)
self.pushButton_10.setGeometry(QtCore.QRect(10, 510, 81, 28))
self.pushButton_10.setObjectName("pushButton_10")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 2, 7, 1, 1)
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setMouseTracking(False)
self.label_7.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1)
self.pushButton_9 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_9.setObjectName("pushButton_9")
self.gridLayout.addWidget(self.pushButton_9, 2, 4, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 3, 1, 1, 2)
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_2)
font = QtGui.QFont()
font.setPointSize(12)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout.addWidget(self.lineEdit_2, 7, 8, 1, 8)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem3, 5, 5, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 2, 15, 1, 1)
self.lineEdit_5 = QtWidgets.QLineEdit(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_5.sizePolicy().hasHeightForWidth())
self.lineEdit_5.setSizePolicy(sizePolicy)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout.addWidget(self.lineEdit_5, 2, 8, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 2, 5, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem6, 5, 6, 1, 9)
self.pushButton_7 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 5, 3, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 2, 3, 1, 1)
self.tableWidget_2 = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget_2.setAutoFillBackground(False)
self.tableWidget_2.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_2.setAlternatingRowColors(False)
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(2)
self.tableWidget_2.setRowCount(1)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget_2.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_2.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_2.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setItem(0, 1, item)
self.tableWidget_2.horizontalHeader().setCascadingSectionResizes(True)
self.tableWidget_2.horizontalHeader().setDefaultSectionSize(80)
self.tableWidget_2.horizontalHeader().setHighlightSections(False)
self.tableWidget_2.horizontalHeader().setMinimumSectionSize(125)
self.tableWidget_2.horizontalHeader().setSortIndicatorShown(True)
self.tableWidget_2.horizontalHeader().setStretchLastSection(True)
self.tableWidget_2.verticalHeader().setVisible(False)
self.tableWidget_2.verticalHeader().setDefaultSectionSize(28)
self.tableWidget_2.verticalHeader().setMinimumSectionSize(28)
self.tableWidget_2.verticalHeader().setSortIndicatorShown(False)
self.gridLayout.addWidget(self.tableWidget_2, 1, 0, 1, 3)
self.pushButton_5 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout.addWidget(self.pushButton_5, 7, 16, 1, 1)
self.label_11 = QtWidgets.QLabel(self.tab_2)
self.label_11.setMouseTracking(False)
self.label_11.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 3, 3, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 7, 17, 1, 1)
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setMouseTracking(False)
self.label_8.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_8.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 0, 3, 1, 1)
self.listWidget_3 = QtWidgets.QListWidget(self.tab_2)
self.listWidget_3.setObjectName("listWidget_3")
self.gridLayout.addWidget(self.listWidget_3, 4, 0, 1, 3)
self.tableWidget_4 = QtWidgets.QTableWidget(self.tab_2)
self.tableWidget_4.setAutoFillBackground(False)
self.tableWidget_4.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_4.setAlternatingRowColors(False)
self.tableWidget_4.setObjectName("tableWidget_4")
self.tableWidget_4.setColumnCount(5)
self.tableWidget_4.setRowCount(1)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget_4.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_4.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_4.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_4.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_4.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignVCenter)
self.tableWidget_4.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_4.setItem(0, 1, item)
self.tableWidget_4.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget_4.horizontalHeader().setDefaultSectionSize(80)
self.tableWidget_4.horizontalHeader().setHighlightSections(False)
self.tableWidget_4.horizontalHeader().setMinimumSectionSize(125)
self.tableWidget_4.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget_4.horizontalHeader().setStretchLastSection(True)
self.tableWidget_4.verticalHeader().setVisible(False)
self.tableWidget_4.verticalHeader().setDefaultSectionSize(28)
self.tableWidget_4.verticalHeader().setMinimumSectionSize(28)
self.gridLayout.addWidget(self.tableWidget_4, 4, 3, 1, 13)
self.treeWidget = QtWidgets.QTreeWidget(self.tab_2)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.header().setDefaultSectionSize(140)
self.gridLayout.addWidget(self.treeWidget, 1, 3, 1, 15)
self.label_6 = QtWidgets.QLabel(self.tab_2)
self.label_6.setMouseTracking(False)
self.label_6.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_6.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 0, 0, 1, 2)
self.label_9 = QtWidgets.QLabel(self.tab_2)
self.label_9.setMouseTracking(False)
self.label_9.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_9.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 3, 16, 1, 1)
self.pushButton_26 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_26.setObjectName("pushButton_26")
self.gridLayout.addWidget(self.pushButton_26, 2, 9, 1, 1)
self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab_2)
self.textBrowser_2.setObjectName("textBrowser_2")
self.gridLayout.addWidget(self.textBrowser_2, 4, 16, 1, 2)
self.pushButton_27 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_27.setObjectName("pushButton_27")
self.gridLayout.addWidget(self.pushButton_27, 2, 10, 1, 1)
self.pushButton_8 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout.addWidget(self.pushButton_8, 5, 4, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 6, 8, 1, 1)
self.label_17 = QtWidgets.QLabel(self.tab_2)
self.label_17.setObjectName("label_17")
self.gridLayout.addWidget(self.label_17, 6, 9, 1, 1)
self.label_18 = QtWidgets.QLabel(self.tab_2)
self.label_18.setText("")
self.label_18.setObjectName("label_18")
self.gridLayout.addWidget(self.label_18, 6, 10, 1, 8)
self.label_7.raise_()
self.label_6.raise_()
self.tableWidget_2.raise_()
self.treeWidget.raise_()
self.tableWidget_4.raise_()
self.textBrowser_2.raise_()
self.label_9.raise_()
self.listWidget_3.raise_()
self.label_11.raise_()
self.label_8.raise_()
self.pushButton_3.raise_()
self.pushButton_7.raise_()
self.pushButton_9.raise_()
self.pushButton_8.raise_()
self.lineEdit_2.raise_()
self.pushButton_5.raise_()
self.pushButton_6.raise_()
self.pushButton_26.raise_()
self.lineEdit_5.raise_()
self.pushButton_27.raise_()
self.pushButton_4.raise_()
self.label_17.raise_()
self.label_18.raise_()
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.graphWidget = PlotWidget(self.tab_3)
self.graphWidget.setGeometry(QtCore.QRect(10, 50, 1451, 691))
self.graphWidget.setObjectName("graphWidget")
self.pushButton_11 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_11.setGeometry(QtCore.QRect(1490, 130, 111, 31))
self.pushButton_11.setObjectName("pushButton_11")
self.pushButton_12 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_12.setGeometry(QtCore.QRect(1490, 230, 111, 31))
self.pushButton_12.setCheckable(False)
self.pushButton_12.setChecked(False)
self.pushButton_12.setAutoDefault(False)
self.pushButton_12.setDefault(False)
self.pushButton_12.setFlat(False)
self.pushButton_12.setObjectName("pushButton_12")
self.pushButton_13 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_13.setGeometry(QtCore.QRect(1490, 270, 111, 31))
self.pushButton_13.setCheckable(True)
self.pushButton_13.setObjectName("pushButton_13")
self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)
self.textBrowser_3.setGeometry(QtCore.QRect(90, 10, 760, 31))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textBrowser_3.sizePolicy().hasHeightForWidth())
self.textBrowser_3.setSizePolicy(sizePolicy)
self.textBrowser_3.setObjectName("textBrowser_3")
self.label_10 = QtWidgets.QLabel(self.tab_3)
self.label_10.setGeometry(QtCore.QRect(10, 10, 71, 30))
self.label_10.setMouseTracking(False)
self.label_10.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_10.setObjectName("label_10")
self.tableWidget_5 = QtWidgets.QTableWidget(self.tab_3)
self.tableWidget_5.setGeometry(QtCore.QRect(10, 760, 1451, 91))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tableWidget_5.sizePolicy().hasHeightForWidth())
self.tableWidget_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(9)
self.tableWidget_5.setFont(font)
self.tableWidget_5.setAutoFillBackground(False)
self.tableWidget_5.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tableWidget_5.setAlternatingRowColors(False)
self.tableWidget_5.setObjectName("tableWidget_5")
self.tableWidget_5.setColumnCount(2)
self.tableWidget_5.setRowCount(3)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget_5.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget_5.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.tableWidget_5.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_5.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_5.setHorizontalHeaderItem(1, item)
self.tableWidget_5.horizontalHeader().setVisible(False)
self.tableWidget_5.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget_5.horizontalHeader().setDefaultSectionSize(127)
self.tableWidget_5.horizontalHeader().setHighlightSections(False)
self.tableWidget_5.horizontalHeader().setMinimumSectionSize(125)
self.tableWidget_5.horizontalHeader().setSortIndicatorShown(False)
self.tableWidget_5.horizontalHeader().setStretchLastSection(False)
self.tableWidget_5.verticalHeader().setVisible(False)
self.tableWidget_5.verticalHeader().setDefaultSectionSize(28)
self.tableWidget_5.verticalHeader().setMinimumSectionSize(28)
self.pushButton_14 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_14.setGeometry(QtCore.QRect(1490, 670, 111, 31))
self.pushButton_14.setCheckable(True)
self.pushButton_14.setObjectName("pushButton_14")
self.pushButton_15 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_15.setGeometry(QtCore.QRect(1490, 90, 111, 31))
self.pushButton_15.setCheckable(True)
self.pushButton_15.setChecked(False)
self.pushButton_15.setObjectName("pushButton_15")
self.pushButton_16 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_16.setGeometry(QtCore.QRect(1490, 310, 111, 31))
self.pushButton_16.setCheckable(False)
self.pushButton_16.setObjectName("pushButton_16")
self.progressBar = QtWidgets.QProgressBar(self.tab_3)
self.progressBar.setGeometry(QtCore.QRect(870, 10, 241, 31))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.pushButton_17 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_17.setGeometry(QtCore.QRect(1490, 710, 111, 31))
self.pushButton_17.setCheckable(True)
self.pushButton_17.setObjectName("pushButton_17")
self.line = QtWidgets.QFrame(self.tab_3)
self.line.setGeometry(QtCore.QRect(1480, 650, 131, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label_5 = QtWidgets.QLabel(self.tab_3)
self.label_5.setGeometry(QtCore.QRect(1507, 630, 71, 20))
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.line_2 = QtWidgets.QFrame(self.tab_3)
self.line_2.setGeometry(QtCore.QRect(1483, 210, 131, 16))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_12 = QtWidgets.QLabel(self.tab_3)
self.label_12.setGeometry(QtCore.QRect(1510, 190, 71, 20))
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.tab_3)
self.label_13.setGeometry(QtCore.QRect(1507, 50, 71, 20))
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.line_3 = QtWidgets.QFrame(self.tab_3)
self.line_3.setGeometry(QtCore.QRect(1480, 70, 131, 16))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.spinBox = QtWidgets.QSpinBox(self.tab_3)
self.spinBox.setGeometry(QtCore.QRect(1570, 360, 42, 22))
self.spinBox.setObjectName("spinBox")
self.label_14 = QtWidgets.QLabel(self.tab_3)
self.label_14.setGeometry(QtCore.QRect(1480, 360, 71, 20))
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.tab_3)
self.label_15.setGeometry(QtCore.QRect(1480, 400, 71, 20))
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.spinBox_2 = QtWidgets.QSpinBox(self.tab_3)
self.spinBox_2.setGeometry(QtCore.QRect(1570, 400, 42, 22))
self.spinBox_2.setObjectName("spinBox_2")
self.label_16 = QtWidgets.QLabel(self.tab_3)
self.label_16.setGeometry(QtCore.QRect(1480, 440, 71, 20))
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.spinBox_3 = QtWidgets.QSpinBox(self.tab_3)
self.spinBox_3.setGeometry(QtCore.QRect(1570, 440, 42, 22))
self.spinBox_3.setObjectName("spinBox_3")
self.tabWidget.addTab(self.tab_3, "")
self.gridLayout_2.addWidget(self.tabWidget, 0, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1656, 25))
self.menubar.setObjectName("menubar")
self.menuQuit = QtWidgets.QMenu(self.menubar)
self.menuQuit.setObjectName("menuQuit")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.menuQuit.addAction(self.actionQuit)
self.menubar.addAction(self.menuQuit.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "QMeas"))
self.label_3.setText(_translate("MainWindow", "Connected Instrument"))
self.pushButton.setText(_translate("MainWindow", "Refresh"))
self.label_2.setText(_translate("MainWindow", "Define the intrument type"))
self.tableWidget.setSortingEnabled(False)
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Name"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Type"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "VISA Address"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.setSortingEnabled(__sortingEnabled)
self.pushButton_2.setText(_translate("MainWindow", "Connect"))
self.label_4.setText(_translate("MainWindow", "Information"))
self.label.setText(_translate("MainWindow", "Available VISA Address"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "Instrument name"))
self.textBrowser.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'PMingLiU\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Please choose a VISA address and the corresponding Intrument.</p></body></html>"))
self.pushButton_10.setText(_translate("MainWindow", "Delete"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Connection"))
self.label_7.setText(_translate("MainWindow", "Methods"))
self.pushButton_9.setText(_translate("MainWindow", "Delete"))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", " Please type the name file here."))
self.lineEdit_5.setPlaceholderText(_translate("MainWindow", "Time (s)"))
self.pushButton_7.setText(_translate("MainWindow", "Read"))
self.pushButton_3.setText(_translate("MainWindow", "Conrtol"))
self.tableWidget_2.setSortingEnabled(False)
item = self.tableWidget_2.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget_2.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Name"))
item = self.tableWidget_2.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Type"))
__sortingEnabled = self.tableWidget_2.isSortingEnabled()
self.tableWidget_2.setSortingEnabled(False)
self.tableWidget_2.setSortingEnabled(__sortingEnabled)
self.pushButton_5.setText(_translate("MainWindow", "RUN"))
self.label_11.setText(_translate("MainWindow", "Read"))
self.pushButton_6.setText(_translate("MainWindow", "STOP"))
self.label_8.setText(_translate("MainWindow", "Control"))
self.tableWidget_4.setSortingEnabled(False)
item = self.tableWidget_4.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget_4.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Name"))
item = self.tableWidget_4.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Type"))
item = self.tableWidget_4.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Read property"))
item = self.tableWidget_4.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Magnification"))
item = self.tableWidget_4.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "Unit"))
__sortingEnabled = self.tableWidget_4.isSortingEnabled()
self.tableWidget_4.setSortingEnabled(False)
self.tableWidget_4.setSortingEnabled(__sortingEnabled)
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "Level"))
self.treeWidget.headerItem().setText(1, _translate("MainWindow", "Name"))
self.treeWidget.headerItem().setText(2, _translate("MainWindow", "Type"))
self.treeWidget.headerItem().setText(3, _translate("MainWindow", "Property"))
self.treeWidget.headerItem().setText(4, _translate("MainWindow", "Target value"))
self.treeWidget.headerItem().setText(5, _translate("MainWindow", "Speed"))
self.treeWidget.headerItem().setText(6, _translate("MainWindow", "Increment"))
self.treeWidget.headerItem().setText(7, _translate("MainWindow", "Ins_Label (test)"))
self.label_6.setText(_translate("MainWindow", "Connected Instrument"))
self.label_9.setText(_translate("MainWindow", "Information"))
self.pushButton_26.setText(_translate("MainWindow", "Time Add level"))
self.pushButton_27.setText(_translate("MainWindow", "Time Add Child"))
self.pushButton_8.setText(_translate("MainWindow", "Delete"))
self.pushButton_4.setText(_translate("MainWindow", "Select the folder"))
self.label_17.setText(_translate("MainWindow", "Current folder:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Measurement"))
self.pushButton_11.setText(_translate("MainWindow", "Stop"))
self.pushButton_12.setText(_translate("MainWindow", "Auto range"))
self.pushButton_13.setText(_translate("MainWindow", "Cursor Position"))
self.label_10.setText(_translate("MainWindow", "Information"))
self.tableWidget_5.setSortingEnabled(False)
item = self.tableWidget_5.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Name"))
item = self.tableWidget_5.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Read property"))
item = self.tableWidget_5.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Value"))
item = self.tableWidget_5.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1"))
item = self.tableWidget_5.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2"))
self.pushButton_14.setText(_translate("MainWindow", "Quit sweep"))
self.pushButton_15.setText(_translate("MainWindow", "Resume | |
<filename>unit.py
import database as d
import jobs as j
import incubator as inc
import bigData as big
import staff
import copy
import math
#Not all units, but all units you can create (no house, church)
def all_units():
unit_list = [Farm, Mill, Brewery, Bakery, Lumberyard, Joinery]
return unit_list
#units use Business's money
class Unit(object):
unitType = "genericUnit"
character = "X"
locality = None #should be a locality
location = () #(x,y), being indices on the localMap.
business = None
stock = None
output = None
def __init__ (self, unitName, unitLocality, unitLocationTuple, business):
self.name = unitName
self.bigdata = big.bigdata(self)
stockLength = range(len(d.getMaterials()))
missionsLength = range(len(d.getUnitMissions()))
self.locality = unitLocality
self.location = unitLocationTuple
self.business = business
self.jobList = []
self.incubator = inc.incubator(self)
self.bigdata = big.bigdata(self)
self.stock = [0 for material in stockLength]
self.output = [0 for material in stockLength]
self.tech = [1 for material in stockLength]
#current prices
self.price = [0 for material in stockLength]
self.purchases = [0 for material in stockLength]
#yesterday's number of sales for each item
self.sales = [0 for material in stockLength]
self.failSales = [0 for material in stockLength]
self.transports = [0 for material in stockLength]
self.failTransports = [0 for material in stockLength]
#Direct Materials Cost of a SINGLE instance of each product
self.DMC = [0 for material in stockLength]
# self.orders = [0 for material in stockLength]
self.crafted = [0 for material in stockLength]
self.planted = [0 for material in stockLength]
self.harvested = [0 for material in stockLength]
self.missions = [False for mission in missionsLength]
self.can_make = [False for material in stockLength]
self.laborTotal = 0
# self.rentTotal = 0
self.customers = []
def toString(self):
print("------------------------------------------")
print(self.name + " is a " + self.unitType + ".")
print("\nCurrent stocks:")
print("Stock:", self.stock)
print("Output:", self.output)
print("\nCurrent crafted:")
print("Crafted: ", self.crafted)
print("Planted: ", self.planted)
print("\nCurrent prices:")
print("Direct material costs:", self.DMC)
print("Last week's labor costs:", self.laborTotal)
print("Sales:", self.sales)
print("Failsales: ", self.failSales)
print("Demand: ", [self.sales[i] + self.failSales[i] for i in range(len(self.sales))])
print("Prices:", self.price)
def getPrice(self):
return self.price
def complain(self, who):
failSale = [0 for i in range(len(d.getMaterials()))]
cost = 0
self.customers.append((who, failSale, copy.copy(self.output), cost, who.capital, False))
def sell(self, who, amounts):
wishamounts = copy.copy(amounts)
wishcost = sum(self.price[i] * wishamounts[i] for i in range(len(self.output)))
for i in range(len(self.output)):
if amounts[i] > self.output[i]:
amounts[i] = math.floor(self.output[i])
cost = sum(self.price[i] * amounts[i] for i in range(len(self.output)))
#verify
if who.canAfford(cost):
#sale
who.addCapital(-cost)
self.business.addCash(cost)
for i in range(len(self.output)):
self.addOutput(i, -amounts[i])
who.addInventory(i, amounts[i])
self.addSales(i, amounts[i])
self.addFailSales(i, wishamounts[i] - amounts[i])
if sum(amounts) > 0:
sold = True
else:
sold = False
#customers is for bigData- cleared during *PHASE*
self.customers.append((who, wishamounts, wishcost, copy.copy(amounts), cost, copy.copy(self.output), who.capital, sold))
return (amounts, cost, sold)
#calculate the price of a single material, without changing it in place- i = materialIndex
def priceCalc(self, i):
#natural rate of profit- 4% return on capital
nrp = 1.04
#K is a constant weight- adjust if needed. At .5 one period is the half life.
K = .5
#natural price
if d.getMaterials()[i] in d.planted:
#2 days, tech same for planting and harvesting
ratio = self.incubator.ratios[d.getMaterials()[i]]
labor = 2 / (self.tech[i] * ratio)
else:
labor = 1 / self.tech[i]
naturalPrice = (self.DMC[i] + labor) * nrp
#if never sold before
if self.price[i] == 0:
price = round(naturalPrice, 2)
oPrice = price
#if sold before
else:
#optimal price, set price.
demand = self.sales[i] + self.failSales[i]
oPrice = (demand / self.output[i]) * naturalPrice
priceAdjustment = (K * (oPrice - self.price[i]))
price = round(self.price[i] + priceAdjustment, 2)
return (price, oPrice, naturalPrice)
#call AFTER transferring but BEFORE transporting. For stores, after restocking and before selling. (rest)
#priceGen gives new prices every period for each item based on the earlier price and
#the "optimal price" that it should trend towards.
def priceGen(self):
yDayNum = self.getDayNum() - 1
oPrice = [0 for i in d.getMaterials()]
naturalPrice = [0 for i in d.getMaterials()]
for i in range(len(self.price)):
if self.output[i] != 0:
(self.price[i], oPrice[i], naturalPrice[i]) = self.priceCalc(i)
#debug
# if self.name in ("<NAME>", "<NAME>", "<NAME>"):
# print(self.name)
# print("DMC: ", self.DMC)
# print("sales: ", self.sales)
# print("output: ", self.output)
# print("nPrice: ", naturalPrice)
# print("oPrice: ", oPrice)
# print("price: ", self.price)
# print("")
self.resetSales()
self.resetCustomers()
def growingPlants(self, materialIndex):
return self.incubator.getGrowing(d.getMaterials()[materialIndex])
def ripePlants(self, materialIndex):
return self.incubator.getRipe(d.getMaterials()[materialIndex])
def plantSeeds(self, materialIndex, amount):
# if self.stock[materialIndex] <= amount:
# amount = self.stock[materialIndex]
# self.stock[materialIndex] -= amount
self.incubator.plant(d.getMaterials()[materialIndex], amount)
def harvest(self, materialIndex, amount):
if self.ripePlants(materialIndex) >= amount:
amount = self.incubator.harvest(d.getMaterials()[materialIndex], amount)
self.addCrafted(materialIndex, amount)
self.addStock(materialIndex, amount)
def getName(self):
return self.name
def getEmployees(self):
employeeList = []
for job in self.jobList:
employeeList += job.getEmployees()
return employeeList
def get_emp_dict(self):
empDict = {}
for job in self.jobList:
empDict[job] = job.getEmployees()
return empDict
def getLocality(self):
return self.locality
def getLocation(self):
return self.location
def getBusiness(self):
return self.business
def getDayNum(self):
return self.locality.getDayNum()
def getDMC(self):
return self.DMC
# def getIsMarket(self):
# return self.isMarket
#for now, skills don't matter. But they will.
def getJobs(self, interviewee):
jobList = copy.copy(self.jobList)
return jobList
def getJobList(self):
return self.jobList
def getOutput(self, materialIndex):
return (self.output[materialIndex])
def getAllOutput(self):
return self.output
def getMissions(self):
return self.missions
def getStock(self, materialIndex):
return (self.stock[materialIndex])
def getAllStock(self):
return self.stock
def getTech(self, materialIndex):
return self.tech[materialIndex]
def getUnitType(self):
return self.unitType
def setBusiness(self, newBusiness):
self.business = newBusiness
#addPurchase, addSales, addFailSales. This is stupid.
def addPurchase(self, materialIndex, amount):
self.purchases[materialIndex] += amount
def addSales(self, materialIndex, amount):
self.sales[materialIndex] += amount
def addFailSales(self, materialIndex, amount):
self.failSales[materialIndex] += amount
def addTransports(self, materialIndex, amount):
self.transports[materialIndex] += amount
def addFailTransports(self, materialIndex, amount):
self.failTransports[materialIndex] += amount
def getTotalDemand(self):
demand = []
for i in range(len(self.sales)):
demand.append(self.sales[i] + self.failSales[i] + self.transports[i] + self.failTransports[i])
return demand
def addStock(self, materialIndex, amount):
self.stock[materialIndex] += amount
def addOutput(self, materialIndex, amount):
self.output[materialIndex] += amount
def addCrafted(self, materialIndex, amount):
self.crafted[materialIndex] += amount
def addPlanted(self, materialIndex, amount):
self.planted[materialIndex] += amount
def addHarvested(self, materialIndex, amount):
self.harvested[materialIndex] += amount
def getCrafted(self):
return self.crafted
#used for displaying production in matplotlib
def getProduction(self):
return ([0,1,2,3,4,5,6,7,8], self.crafted)
def getSales(self):
return ([0,1,2,3,4,5,6,7,8], self.sales)
def addJob(self, job):
self.jobList.append(job)
def removeJob(self, job):
self.jobList.remove(job)
def setDMC(self, materialIndex, DMCost):
self.DMC[materialIndex] = DMCost
def setLaborTotal(self, laborTotal):
self.laborTotal = laborTotal
def resetPurchases(self):
self.purchases = [0 for i in self.purchases]
def resetSales(self):
self.sales = [0 for i in self.sales]
self.failSales = [0 for i in self.failSales]
self.transports = [0 for i in self.transports]
self.failTransports = [0 for i in self.failTransports]
def resetCrafted(self):
self.crafted = [0 for i in self.crafted]
def resetPlanted(self):
self.planted = [0 for i in self.planted]
def resetHarvested(self):
self.harvested = [0 for i in self.harvested]
def resetCustomers(self):
self.customers = []
def getRevenue(self):
revenue = []
for i in range(len(self.sales)):
thisRev = round(self.sales[i] * self.price[i], 2)
revenue.append(thisRev)
return revenue
def dailyRevenue(self):
materials = d.getMaterials()
revenue = self.getRevenue()
noSales = True
toString = ("\n" + self.name + " made")
for i in range(len(revenue)):
if revenue[i] != 0:
if not noSales:
toString += ","
toString += (
" $" + str(revenue[i]) +
" from " + str(self.sales[i]) +
"/" + str(self.sales[i] + self.failSales[i]) +
" sales of " + materials[i]
)
noSales = False
toString += "."
if noSales:
toString = ("\n" + self.name + " made no sales today.")
return toString
def dailyCrafted(self):
materials = d.getMaterials()
toString = ("\n" + self.name + " created")
noCrafted = True
for i in range(len(self.crafted)):
if self.crafted[i] != 0:
if not noCrafted:
toString += ","
toString += (
" " +
str(self.crafted[i]) +
" " + str(materials[i])
)
noCrafted = False
toString += "."
if noCrafted:
toString = ("\n" + self.name + " didn't craft anything today.")
return toString
def dailyExpenses(self):
pass
class Manufactury(Unit):
unitType = "Manufactury"
character = "Manu"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Unit.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.missions[d.MANU_INDEX] = True
self.staff = staff.manu_staff(self)
class Farm(Manufactury):
unitType = "Farm"
character = "F"
def __init__(self, unitName, unitLocality, unitLocationTuple, business):
Manufactury.__init__(self, unitName, unitLocality, unitLocationTuple, business)
self.can_make[d.GRAIN_INDEX] = True
self.tech[d.GRAIN_INDEX] = 4.5
self.stock[d.GRAIN_INDEX] = 50
# self.DMC[d.GRAIN_INDEX] = 1
self.failSales[d.GRAIN_INDEX] = 500
d.addUnit(self)
if self.business is not None:
self.business.addUnit(self)
#20-30 kg flour per hour- ~440 lb per 8 hours
class Mill(Manufactury):
unitType = "Mill"
| |
<gh_stars>1-10
"""Test ``sfini.state._state``."""
from sfini.state import _state as tscr
import pytest
from unittest import mock
from sfini.state import _base
import datetime
import sfini
class TestFail:
"""Test ``sfini.state._state.Fail``."""
@pytest.fixture
def state(self):
"""An example Fail instance."""
return tscr.Fail(
"spam",
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output",
error="BlaSpammed",
cause="a bla has spammed")
def test_init(self, state):
"""Fail initialisation."""
assert state.name == "spam"
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
assert state.error == "BlaSpammed"
assert state.cause == "a bla has spammed"
@pytest.mark.parametrize(
("error", "cause", "exp"),
[
(tscr._default, tscr._default, {}),
(
tscr._default,
"a bla has spammed",
{"Cause": "a bla has spammed"}),
("BlaSpammed", tscr._default, {"Error": "BlaSpammed"}),
(
"BlaSpammed",
"a bla has spammed",
{"Cause": "a bla has spammed", "Error": "BlaSpammed"})])
def test_to_dict(self, state, error, cause, exp):
"""Definition dictionary construction."""
state.error = error
state.cause = cause
exp["Type"] = "Fail"
exp["Comment"] = "a state"
exp["InputPath"] = "$.spam.input"
exp["OutputPath"] = "$.spam.output"
res = state.to_dict()
assert res == exp
class TestPass:
"""Test ``sfini.state._state.Pass``."""
@pytest.fixture
def state(self):
"""An example Pass instance."""
return tscr.Pass(
"spam",
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output",
result_path="$.result",
result={"foo": [1, 2], "bar": None})
def test_init(self, state):
"""Pass initialisation."""
assert state.name == "spam"
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
assert state.result_path == "$.result"
assert state.result == {"foo": [1, 2], "bar": None}
@pytest.mark.parametrize(
("result", "exp"),
[
(tscr._default, {}),
(
{"foo": [1, 2], "bar": None},
{"Result": {"foo": [1, 2], "bar": None}})])
def test_to_dict(self, state, result, exp):
"""Definition dictionary construction."""
state.next = mock.Mock(spec=_base.State)
state.next.name = "bla"
state.result = result
exp["Type"] = "Pass"
exp["Comment"] = "a state"
exp["InputPath"] = "$.spam.input"
exp["OutputPath"] = "$.spam.output"
exp["ResultPath"] = "$.result"
exp["Next"] = "bla"
res = state.to_dict()
assert res == exp
class TestWait:
"""Test ``sfini.state._state.Wait``."""
@pytest.fixture
def state(self):
"""An example Wait instance."""
return tscr.Wait(
"spam",
42,
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output")
def test_init(self, state):
"""Wait initialisation."""
assert state.name == "spam"
assert state.until == 42
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
class TestToDict:
"""Definition dictionary construction."""
_now = datetime.datetime.now(tz=datetime.timezone.utc)
_now += datetime.timedelta(hours=24)
@pytest.mark.parametrize(
("until", "exp"),
[
(42, {"Seconds": 42}),
(_now, {"Timestamp": _now.isoformat("T")}),
("$.spam.waitDate", {"TimestampPath": "$.spam.waitDate"}),
pytest.param(
"$.spam.waitTime",
{"SecondsPath": "$.spam.waitTime"},
marks=pytest.mark.xfail(
reason=(
"Need to implement seconds-variable wait-time")))])
def test_valid(self, state, until, exp):
"""Provided 'wait until' is valid."""
state.next = mock.Mock(spec=_base.State)
state.next.name = "bla"
state.until = until
exp["Type"] = "Wait"
exp["Comment"] = "a state"
exp["InputPath"] = "$.spam.input"
exp["OutputPath"] = "$.spam.output"
exp["Next"] = "bla"
res = state.to_dict()
assert res == exp
@pytest.mark.parametrize(
"until",
[None, [1, 2], {"SecondsPath": "$.spam.waitTime"}])
def test_invalid(self, state, until):
"""Provided 'wait until' is invalid."""
state.until = until
with pytest.raises(TypeError) as e:
_ = state.to_dict()
assert str(type(until)) in str(e.value)
def test_naive_datetime(self, state):
"""Until date-time is naive."""
state.next = mock.Mock(spec=_base.State)
state.next.name = "bla"
state.until = datetime.datetime.now()
with pytest.raises(ValueError) as e:
_ = state.to_dict()
assert "aware" in str(e.value)
class TestParallel:
"""Test ``sfini.state._state.Parallel``."""
@pytest.fixture
def state(self):
"""An example Parallel instance."""
return tscr.Parallel(
"spam",
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output",
result_path="$.result")
def test_init(self, state):
"""Parallel initialisation."""
assert state.name == "spam"
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
assert state.result_path == "$.result"
assert state.next is None
assert state.retriers == []
assert state.catchers == []
def test_add(self, state):
"""Branch adding."""
state.branches = []
state_machine = mock.Mock(spec=sfini.state_machine.StateMachine)
state.add(state_machine)
class TestToDict:
"""Definition dictionary construction."""
def test_no_error_handlers(self, state):
"""No retriers and catchers."""
# Setup environment
state.branches = [
mock.Mock(spec=sfini.state_machine.StateMachine)
for _ in range(3)]
state.next = mock.Mock()
state.next.name = "bla"
state._get_retrier_defns = mock.Mock(return_value=[])
state._get_catcher_defns = mock.Mock(return_value=[])
# Build expectation
exp = {
"Type": "Parallel",
"Comment": "a state",
"InputPath": "$.spam.input",
"OutputPath": "$.spam.output",
"ResultPath": "$.result",
"Next": "bla",
"Branches": [sm.to_dict.return_value for sm in state.branches]}
# Run function
res = state.to_dict()
# Check result
assert res == exp
state._get_retrier_defns.assert_called_once_with()
state._get_catcher_defns.assert_called_once_with()
[sm.to_dict.assert_called_once_with() for sm in state.branches]
def test_retry_catch(self, state):
"""With retriers and catchers."""
# Setup environment
state.branches = [
mock.Mock(spec=sfini.state_machine.StateMachine)
for _ in range(3)]
state.next = mock.Mock()
state.next.name = "bla"
state.retriers = [
(
["BlaSpammed", "FooBarred"],
{"interval": 5, "max_attempts": 10, "backoff_rate": 2.0}),
(
["States.ALL"],
{
"interval": tscr._default,
"max_attempts": 3,
"backoff_rate": tscr._default})]
retry_defns = [
{
"ErrorEquals": ["BlaSpammed", "FooBarred"],
"IntervalSeconds": 5,
"MaxAttempts": 10,
"BackoffRate": 2.0},
{"ErrorEquals": ["States.ALL"], "MaxAttempts": 3}]
state._get_retrier_defns = mock.Mock(return_value=retry_defns)
foo_state = mock.Mock(spec=_base.State)
bar_state = mock.Mock(spec=_base.State)
state.catchers = [
(
["BlaSpammed", "FooBarred"],
{"next_state": foo_state, "result_path": "$.error-info"}),
(
["States.ALL"],
{"next_state": bar_state, "result_path": tscr._default})]
catch_defns = [
{
"ErrorEquals": ["BlaSpammed", "FooBarred"],
"Next": "foo",
"ResultPath": "$.error-info"},
{"ErrorEquals": ["States.ALL"], "Next": "bla"}]
state._get_catcher_defns = mock.Mock(return_value=catch_defns)
# Build expectation
exp = {
"Type": "Parallel",
"Comment": "a state",
"InputPath": "$.spam.input",
"OutputPath": "$.spam.output",
"ResultPath": "$.result",
"Next": "bla",
"Retry": retry_defns,
"Catch": catch_defns,
"Branches": [sm.to_dict.return_value for sm in state.branches]}
# Run function
res = state.to_dict()
# Check result
assert res == exp
state._get_retrier_defns.assert_called_once_with()
state._get_catcher_defns.assert_called_once_with()
[sm.to_dict.assert_called_once_with() for sm in state.branches]
class TestChoice:
"""Test ``sfini.state._state.Choice``."""
@pytest.fixture
def state(self):
"""An example Choice instance."""
return tscr.Choice(
"spam",
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output")
def test_init(self, state):
"""Choice initialisation."""
assert state.name == "spam"
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
assert state.choices == []
assert state.default is None
class TestAddTo:
"""Add state to collection."""
def test_no_default(self, state):
"""No default state."""
# Setup environmnent
foo_rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
foo_rule.next_state = mock.Mock(spec=_base.State)
foo_rule.next_state.name = "fooNext"
bar_rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
bar_rule.next_state = mock.Mock(spec=_base.State)
bar_rule.next_state.name = "barNext"
state.choices = [foo_rule, bar_rule]
# Build input
states = {
"bla": mock.Mock(spec=_base.State),
"barNext": foo_rule.next_state}
# Build expectation
exp_states = {
"bla": states["bla"],
"barNext": foo_rule.next_state,
"spam": state}
# Run function
state.add_to(states)
# Check result
assert states == exp_states
foo_rule.next_state.add_to.assert_called_once_with(states)
bar_rule.next_state.add_to.assert_not_called()
def test_has_default(self, state):
"""Has default state."""
# Setup environmnent
foo_rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
foo_rule.next_state = mock.Mock(spec=_base.State)
foo_rule.next_state.name = "fooNext"
bar_rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
bar_rule.next_state = mock.Mock(spec=_base.State)
bar_rule.next_state.name = "barNext"
state.choices = [foo_rule, bar_rule]
state.default = mock.Mock(spec=_base.State)
state.default.name = "default"
# Build input
states = {
"bla": mock.Mock(spec=_base.State),
"barNext": foo_rule.next_state}
# Build expectation
exp_states = {
"bla": states["bla"],
"barNext": foo_rule.next_state,
"spam": state}
# Run function
state.add_to(states)
# Check result
assert states == exp_states
state.default.add_to.assert_called_once_with(states)
foo_rule.next_state.add_to.assert_called_once_with(states)
bar_rule.next_state.add_to.assert_not_called()
class TestAdd:
"""Choice-rule adding."""
def test_has_next_state(self, state):
"""Rule has a next-state."""
state.choices = []
rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
rule.next_state = mock.Mock(spec=_base.State)
exp_choices = [rule]
state.add(rule)
assert state.choices == exp_choices
def test_no_next_state(self, state):
"""Rule has no a next-state."""
state.choices = []
rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
rule.next_state = None
exp_choices = []
with pytest.raises(RuntimeError) as e:
state.add(rule)
assert str(rule) in str(e.value)
assert state.choices == exp_choices
class TestRemove:
"""Choice-rule removal."""
def test_registered(self, state):
"""Rule is an existing branch."""
rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
state.choices = [rule]
exp_choices = []
state.remove(rule)
assert state.choices == exp_choices
def test_not_registered(self, state):
"""Rule is not an existing branch."""
rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
foo_rule = mock.Mock(spec=sfini.state.choice.ChoiceRule)
state.choices = [foo_rule]
exp_choices = [foo_rule]
with pytest.raises(ValueError) as e:
state.remove(rule)
assert str(rule) in str(e.value)
assert state.choices == exp_choices
@pytest.mark.parametrize(
"prev_default",
[None, mock.Mock(spec=_base.State)])
def test_set_default(self, state, prev_default):
"""Default state-setting."""
state.default = prev_default
default_state = mock.Mock(spec=_base.State)
state.set_default(default_state)
assert state.default is default_state
class TestToDict:
"""Definition dictionary construction."""
def test_no_default(self, state):
"""No default state registered."""
state.choices = [
mock.Mock(spec=sfini.state.choice.ChoiceRule)
for _ in range(3)]
exp = {
"Type": "Choice",
"Comment": "a state",
"InputPath": "$.spam.input",
"OutputPath": "$.spam.output",
"Choices": [c.to_dict.return_value for c in state.choices]}
res = state.to_dict()
assert res == exp
[c.to_dict.assert_called_once_with() for c in state.choices]
def test_with_default(self, state):
"""Default state registered."""
state.default = mock.Mock(spec=_base.State)
state.default.name = "bla"
state.choices = [
mock.Mock(spec=sfini.state.choice.ChoiceRule)
for _ in range(3)]
exp = {
"Type": "Choice",
"Comment": "a state",
"InputPath": "$.spam.input",
"OutputPath": "$.spam.output",
"Default": "bla",
"Choices": [c.to_dict.return_value for c in state.choices]}
res = state.to_dict()
assert res == exp
[c.to_dict.assert_called_once_with() for c in state.choices]
def test_no_next_path(self, state):
"""No transition available for choice."""
with pytest.raises(RuntimeError) as e:
state.to_dict()
assert " no" in str(e.value)
assert "path" in str(e.value) or "transition" in str(e.value)
assert str(state) in str(e.value)
class TestTask:
"""Test ``sfini.state._state.Task``."""
@pytest.fixture
def resource_mock(self):
"""Task resource mock."""
return mock.Mock(spec=sfini.task_resource.TaskResource)
@pytest.fixture
def state(self, resource_mock):
"""An example Task instance."""
return tscr.Task(
"spam",
resource_mock,
comment="a state",
input_path="$.spam.input",
output_path="$.spam.output",
result_path="$.result",
timeout=42)
def test_init(self, state, resource_mock):
"""Task initialisation."""
assert state.name == "spam"
assert state.resource is resource_mock
assert state.comment == "a state"
assert state.input_path == "$.spam.input"
assert state.output_path == "$.spam.output"
assert state.result_path == "$.result"
assert state.timeout == 42
assert state.next is None
assert state.retriers == []
assert state.catchers | |
this
task, but it will have to be done in that separate process, because its
results can't be serialized and transmitted. (On the other hand, if we
have a dependent task to run *in this same process*, we'll want to bring
this task to the CACHED level instead.) As with (a), for a task with
non-persistable output, this milestone is reached as soon as we compute
its provenance; for a task with persistable output, it's reached only
when the task is computed and its output is cached.
4. CACHED: The task has been computed and its output value is stored somewhere --
in the persistent cache, in memory on the TaskState, and/or in memory on this
entry (depending on the cache settings). This is the final level: after this,
there is no more work to do on this task.
Normally an entry will only make forward progress through these levels; however,
we do sometimes evict temporarily-memoized values, which can cause an entry to
regress from CACHED to PRIMED.
"""
CREATED = auto()
INITIALIZED = auto()
PRIMED = auto()
CACHED = auto()
class EntryPriority(IntEnum):
"""
Indicates a level of priority for a TaskRunnerEntry.
When multiple entries are in the PENDING stage, an entry with higher priority will
always be activated before one with lower priority. There are currently three
priorities:
1. NORMAL: Most entries will have this priority.
2. HIGH: This is for entries that some meaningful side effect depends on. For
example, after computing a value, we want to make sure it gets persisted to disk
(if appropriate) before any other work happens.
3. TOP: This is for entries that some effect depends on *and* that depends on a
potentially large in-memory objet. For example, if we compute a tuple value, our
first priority should be to decompose it into smaller objects, allowing the
original tuple to be garbage-collected; until that happens, the individual
objects can't be garbage-collected either.
"""
NORMAL = auto()
HIGH = auto()
TOP = auto()
# TODO Let's reorder the methods here with this order:
# 1. First public, then private.
# 2. Rough chronological order.
class TaskState:
"""
Represents the state of a task computation. Keeps track of its position in
the task graph, whether its values have been computed yet, additional
intermediate state and the deriving logic.
Parameters
----------
task: Task
The task whose state we're tracking.
dep_states: list of TaskStates
TaskStates that we depend on; these correspond to `task.dep_keys`.
followup_states: list of TaskStates
Other TaskStates that should run immediately after this one.
func_attrs: FunctionAttributes
Additional details about the task's `compute_func` function.
TODO This should probably be on the Task object itself.
desc_metadata: DescriptorMetadata
Extra info about the descriptor whose value is produced by this task.
"""
def __init__(
self,
task,
dep_states,
followup_states,
func_attrs,
desc_metadata,
):
self.task = task
self.dep_states = dep_states
self.followup_states = followup_states
self.func_attrs = func_attrs
self.desc_metadata = desc_metadata
# Cached values.
self.task_key = task.key
# These are set by initialize().
self.is_initialized = False
self._provenance = None
self._cache_accessor = None
# This can be set by compute(), _load_value_hash(), or
# attempt_to_access_persistent_cached_value().
# This will be present only if should_persist is True.
self._result_value_hash = None
# This can be set by get_cached_result() or compute().
# TODO It would be nice to move this to a central in-memory cache object, like
# context.temp_result_cache but with a longer lifetime. However, it would be
# a little weird to move this but still have self._result_value_hash here.
# Would it makes sense to remove the latter altogether and just retrieve it
# lazily from self._cache_accessor?
self._result = None
@property
def should_memoize(self):
return self.desc_metadata.should_memoize
@property
def should_memoize_for_query(self):
return self.desc_metadata.should_memoize_for_query
@property
def should_persist(self):
return self.desc_metadata.should_persist and not self.output_would_be_missing()
@property
def is_cached(self):
"""
Indicates whether the task state's result is cached.
"""
if self.should_persist:
# If our value is persistable, it can be saved either on disk or in memory,
# but only the former counts as being officially "cached".
return self._result_value_hash is not None
else:
return self._result is not None
def output_would_be_missing(self):
return self.task_key.case_key.has_missing_values
def __repr__(self):
return f"TaskState({self.task!r})"
def get_cached_result(self, context):
"Returns the result of an already-computed task state."
assert self.is_cached
if self._result is not None:
context.task_key_logger.log_accessed_from_memory(self.task_key)
return self._result
local_artifact = self._cache_accessor.replicate_and_load_local_artifact()
value = self._value_from_local_artifact(local_artifact)
result = Result(
task_key=self.task_key,
value=value,
local_artifact=local_artifact,
)
context.task_key_logger.log_loaded_from_disk(result.task_key)
if self.should_memoize:
self._result = result
return result
def attempt_to_access_persistent_cached_value(self):
"""
Loads the hash of the persisted value for this task, if it exists.
If the persisted value is available in the cache, this object's `is_cached`
property will become True. Otherwise, nothing will happen.
"""
assert self.is_initialized
assert not self.is_cached
if not self.should_persist:
return
if not self._cache_accessor.can_load():
return
self._load_value_hash()
def refresh_all_persistent_cache_state(self, context):
"""
Refreshes all state that depends on the persistent cache.
This is useful if the external cache state might have changed since we last
worked with this task.
"""
# If this task state is not initialized or not persisted, there's nothing to
# refresh.
if not self.is_initialized or not self.should_persist:
return
self.refresh_cache_accessor(context)
# If we haven't loaded anything from the cache, we can stop here.
if self._result_value_hash is None:
return
# Otherwise, let's update our value hash from the cache.
if self._cache_accessor.can_load():
self._load_value_hash()
else:
self._result_value_hash = None
def sync_after_remote_computation(self):
"""
Syncs the task state by populating and reloading data in the current process
after completing the task state in a subprocess.
This is necessary because values populated in the task state are not
communicated back from the subprocess.
"""
# If this state was never initialized, it doesn't have any out-of-date
# information, so there's no need to update anything.
if not self.is_initialized:
return
assert self.should_persist
# First, let's flush the stored entries in our cache accessor. Since we just
# computed this entry in a subprocess, there should be a new cache entry that
# isn't reflected yet in our local accessor.
# (We don't just call self.refresh_cache_accessors() because we don't
# particularly want to do the cache versioning check -- it's a little late to
# do anything if it fails now.)
self._cache_accessor.flush_stored_entries()
# Then, populate the value hashes.
if self._result_value_hash is None:
self._load_value_hash()
def initialize(self, context):
"Initializes the task state to get it ready for completion."
if self.is_initialized:
return
# First, set up the provenance.
dep_provenance_digests_by_task_key = {
dep_key: dep_state._get_digest()
for dep_key, dep_state in zip(self.task.dep_keys, self.dep_states)
}
self._provenance = Provenance.from_computation(
task_key=self.task_key,
code_fingerprint=self.func_attrs.code_fingerprint,
dep_provenance_digests_by_task_key=dep_provenance_digests_by_task_key,
treat_bytecode_as_functional=(
context.core.versioning_policy.treat_bytecode_as_functional
),
can_functionally_change_per_run=self.func_attrs.changes_per_run,
flow_instance_uuid=context.flow_instance_uuid,
)
# Lastly, set up cache accessors.
if self.should_persist:
self.refresh_cache_accessor(context)
self.is_initialized = True
def refresh_cache_accessor(self, context):
"""
Initializes the cache acessor for this task state.
This sets up state that allows us to read and write cache entries for this
task's value. This includes some in-memory representations of exernal persistent
resources (files or cloud blobs); calling this multiple times can be necessary
in order to wipe this state and allow it get back in sync with the real world.
"""
self._cache_accessor = context.core.persistent_cache.get_accessor(
task_key=self.task_key,
provenance=self._provenance,
)
if context.core.versioning_policy.check_for_bytecode_errors:
self._check_accessor_for_version_problems()
def _check_accessor_for_version_problems(self):
"""
Checks for any versioning errors -- i.e., any cases where a task's
function code was updated but its version annotation was not.
"""
old_prov = self._cache_accessor.load_provenance()
if old_prov is None:
return
new_prov = self._cache_accessor.provenance
if old_prov.exactly_matches(new_prov):
return
if old_prov.nominally_matches(new_prov):
# If we have a nominal match but not an exact match, that means the
# user must changed a function's bytecode but not its version. To report
# this, we first need to figure out which function changed. It could be
# the one for this task, or it could be any immediate non-persisted
# ancestor of this one. Fortunately, each provenance contains links to each of
# its dependency digests, and a digest of non-persisted value contains that
# value's provenance, so we can recursively search through our ancestor
# provenances until we find which one caused the mismatch.
def locate_mismatched_provenances_and_raise(old_prov, new_prov):
assert old_prov.nominally_matches(new_prov)
# | |
noise', 'Outlet valve closure noise']
add_cyl_num=['Exhaust temperature','Exhaust temperature delta', 'Ignition voltage', 'ITP','Knock integrator','Knock noise', #'Exhaust temperature delta' added for delta to mean value
'Pressure 49° before TDC', 'Mechanical noise', 'Cylinder state', 'Close current gradient',
'Inlet valve closure timing', 'Outlet valve closure timing']
add_num=['Knock signal','P-max','AI','IMEP','Duration of opening','Conrod bearing temperature','CQ max','CQ','Slow down time']
add_mid=[]#talk with Sebastian what is looked at analyzis
to_check=add_cyl_num+add_num+add_mid
to_remove=[]
for col in y['col']:
if (any(ele in col for ele in to_check) and not col[-2:] in rel_cyl): #check if elemt in expanded elements and not in rel_cyl
#bug with add_cyl_short_num, exception would need to be added
to_remove.append (col)
y['col']=[i for i in y['col'] if not i in to_remove ] #remove original column
return y
def load_pltcfg_from_excel ():
"""Load plotconfig from Excel Sheet "Input" necessary in same folder
Returns:
pltcfg (list of dicts): pltcfg with list of dicts
plt_titles (list of String): titles of plots
.....
"""
import math
def is_number(s):
""" Returns True is string is a number. """
try:
float(s)
return math.isfinite(s)
except ValueError:
return False
df_cfg=pd.read_excel('Input_validation_dashboard.xlsx', sheet_name='Pltcfg', usecols=['Plot_Nr', 'Axis_Nr', 'Name', 'Unit', 'y-lim min', 'y-lim max'])
df_cfg.sort_values(by=['Plot_Nr','Axis_Nr'], inplace=True)
df_cfg.dropna(subset=['Plot_Nr', 'Axis_Nr', 'Name'], inplace=True)
df_cfg['p_equal'] = df_cfg.Plot_Nr.eq(df_cfg.Plot_Nr.shift())
df_cfg['a_equal'] = df_cfg.Axis_Nr.eq(df_cfg.Axis_Nr.shift())
pltcfg=[]
plt_titles=[]
for i in range(len(df_cfg)):
if df_cfg.p_equal.iloc[i]==False:
pltcfg.append([]) #new plot
if df_cfg.Axis_Nr.iloc[i]==0: #append title if axis=0
plt_titles.append(df_cfg.Name.iloc[i]) #append title
else:
plt_titles.append('')
if df_cfg.Axis_Nr.iloc[i]!=0:
if df_cfg.a_equal.iloc[i]==False or df_cfg.p_equal.iloc[i]==False:
pltcfg[-1].append(dict()) #new axis
y=pltcfg[-1][-1]
if type(df_cfg.Name.iloc[i])==str:
if 'col' in y:
y['col'].append(df_cfg.Name.iloc[i].replace('\xa0', ' '))
else:
y['col']=[df_cfg.Name.iloc[i].replace('\xa0', ' ')]
if 'unit' not in y and type(df_cfg.Unit.iloc[i])==str: #take first occurance of unit
y['unit']=df_cfg.Unit.iloc[i].replace('\xa0', ' ')
lim_min=df_cfg['y-lim min'].iloc[i]
lim_max=df_cfg['y-lim max'].iloc[i]
if 'ylim' not in y and is_number(lim_min) and is_number(lim_max):
y['ylim']=(lim_min, lim_max) #add tuple y lim
return pltcfg, plt_titles
def show_val_stats (vl, df_loadrange=None, df_starts_oph=None):
"""
Calculates
Sort out some cylinder specific parameters, so that only the ones interested in are displayed
The rest is loaded beforehand for shorter overall loading time
Args:
vl (dmyplant2.Validation): Validation Objekt
df_loadrange (pd.DataFrame) (optional): Dataframe with load information
df_starts_oph (pd-DatFrame) (optional): DataFrame with information about oph per start
Returns:
text_lay (bokeh.models.layouts.Column): Bokeh Column, can be displayed directly with show() or used further in a sheet or tab
example:
.....
"""
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
elements=[]
#### loadrange
if not df_loadrange.empty:
loadrange_info=Div(text="<h3>Power load (P/Pnom)</h3>(Valid for displayed data)")
df_loadrange=df_loadrange*100
for col in df_loadrange.columns: df_loadrange[col]=df_loadrange[col].map("{:,.1f}%".format)
df_loadrange.insert(0, 'Engine', df_loadrange.index)
Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_loadrange.columns] # bokeh columns
loadrange = DataTable(columns=Columns, source=ColumnDataSource(df_loadrange), autosize_mode='fit_columns', height=30*(len(df_loadrange.index)+1), index_position=None) # bokeh table
elements+=[loadrange_info, loadrange]#, loadrange_info2]
#### starts_oph
if not df_starts_oph.empty:
starts_oph_info=Div(text="<h3>OPH and Starts</h3>(Valid for displayed data)")
df_starts_oph['OPH']=df_starts_oph['OPH'].map("{:,.1f}".format)
df_starts_oph['OPH/ Start']=df_starts_oph['OPH/ Start'].map("{:,.1f}".format)
df_starts_oph.insert(0, 'Engine', df_starts_oph.index)
Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_starts_oph.columns] # bokeh columns
starts_oph = DataTable(columns=Columns, source=ColumnDataSource(df_starts_oph), autosize_mode='fit_columns', height=30*(len(df_starts_oph.index)+1), index_position=None) # bokeh table
elements+=[starts_oph_info, starts_oph]#, starts_oph_info2]
d=vl.dashboard
# Read Values defined in tdef from Myplant into a pd.dataframe
tdef = {161: 'Count_OpHour', 102: 'Power_PowerAct', 1258: 'OperationalCondition', 19074: 'Various_Bits_CollAlarm'}
ntable = [[e] + [e.get_dataItem(v) for v in tdef.values()] for e in vl.engines]
dft = pd.DataFrame(ntable, columns=['Name'] + list(tdef.values()))
info_text=Div(text="<style>h3, h4{ margin: 0;}</style>"+
f"<h3>{dft.OperationalCondition.count()} Engines in Validation Fleet:</h3>"+
f"{dft[((dft.OperationalCondition == 'Running') | (dft.Power_PowerAct > 0))].OperationalCondition.count()} Validation Engines UP and Running<br>"+
f"{dft[((dft.OperationalCondition != 'Running') & (dft.Power_PowerAct == 0))].OperationalCondition.count()} Validation Engines not Running")
#Display OPH characteristics
oph_info_tit=Div(text="<h3>Validation progress</h3>")
df_oph=pd.DataFrame(columns=['Characteristic','OPH'])
df_oph=df_oph.append({'Characteristic':'Fleet leader', 'OPH': f"{max(d['OPH Validation']):.0f}"}, ignore_index=True)
#df_oph=df_oph.append({'Characteristic':'75% quantile', 'OPH': f"{np.quantile(d['oph parts'],q=0.75):.0f}"}, ignore_index=True)
#df_oph=df_oph.append({'Characteristic':'Median', 'OPH': f"{np.median(d['oph parts']):.0f}"}, ignore_index=True)
#df_oph=df_oph.append({'Characteristic':'50% quantile', 'OPH': f"{np.quantile(d['oph parts'],q=0.5):.0f}"}, ignore_index=True)
#df_oph=df_oph.append({'Characteristic':'25% quantile', 'OPH': f"{np.quantile(d['oph parts'],q=0.25):.0f}"}, ignore_index=True)
df_oph=df_oph.append({'Characteristic':'Average', 'OPH': f"{np.average(d['OPH Validation']):.0f}"}, ignore_index=True)
df_oph=df_oph.append({'Characteristic':'Cumulated', 'OPH': f"{sum(d['OPH Validation']):.0f}"}, ignore_index=True)
Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_oph.columns] # bokeh columns
oph_info = DataTable(columns=Columns, source=ColumnDataSource(df_oph), autosize_mode='fit_columns', height=30*(len(df_oph.index)+1),index_position=None) # bokeh table
#Displayengines with certain states
spec_eng=Div(text="<h3>Engines with special states:</h3>")
tit_run=Div(text="<h4>Engines not running:</h4>")
dfd=dft[((dft.OperationalCondition != 'Running') & (dft.Power_PowerAct == 0))]
if dfd.empty:
table_run=Div(text="<em>All engines running</em>")
else:
dfd['Name'] = dfd.apply(lambda row : row[0]._info.get('Validation Engine'), axis=1)
Columns2 = [TableColumn(field=Ci, title=Ci) for Ci in dfd.columns] # bokeh columns
table_run = DataTable(columns=Columns2, source=ColumnDataSource(dfd), autosize_mode='fit_columns', height=30*(len(dfd.index)+1)) # bokeh table
tit_con=Div(text="<h4>Engines without contact:</h4>")
dfc=dft[((dft.OperationalCondition == 'No Contact') | (dft.OperationalCondition == 'Never Connected'))]
if dfc.empty:
table_con=Div(text="<em>All engines in contact</em>")
else:
dfc['Name'] = dfc.apply(lambda row : row[0]._info.get('Validation Engine'), axis=1)
Columns = [TableColumn(field=Ci, title=Ci) for Ci in dfc.columns] # bokeh columns
table_con = DataTable(columns=Columns, source=ColumnDataSource(dfc), autosize_mode='fit_columns', height=30*(len(dfc.index)+1)) # bokeh table
tit_alarm=Div(text="<h4>Engines with Alarm Flag not 0:</h4>")
dfe=dft[dft.Various_Bits_CollAlarm != 0]
if dfe.empty:
table_alarm=Div(text="<em>No engines with alarms</em>")
else:
dfe['Name'] = dfe.apply(lambda row : row[0]._info.get('Validation Engine'), axis=1)
Columns3 = [TableColumn(field=Ci, title=Ci) for Ci in dfe.columns] # bokeh columns
table_alarm = DataTable(columns=Columns3, source=ColumnDataSource(dfe), autosize_mode='fit_columns', height=30*(len(dfe.index)+1)) # bokeh table
#Display trips
trip_div=[Div(text="<h3>Recent alarms of engines with alarm:</h3>")]
dtripped = dft[dft.Various_Bits_CollAlarm == 1]
for eng in dtripped.values:
le = eng[0]
trip_div.append(Div(text='<h4>'+le._info.get('Validation Engine')+'</h4>'))
dtrips = le.batch_hist_alarms(p_severities=[800], p_offset=0, p_limit=5)
dtrips['datetime'] = pd.to_datetime(dtrips['timestamp'] * 1000000.0).dt.strftime("%m-%d-%Y %H:%m")
df_print=dtrips[['datetime', 'message', 'name','severity']]
Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_print.columns] # bokeh columns
trip_div.append(DataTable(columns=Columns, source=ColumnDataSource(df_print), autosize_mode='fit_columns', height=30*(len(df_print.index)+1))) # bokeh table
#Create bar for figure call
bar_source=ColumnDataSource({'Validation Engines UP and Running': [(dft[((dft.OperationalCondition == 'Running') | (dft.Power_PowerAct > 0))].OperationalCondition.count())], 'Validation Engines not Running': [(dft[((dft.OperationalCondition != 'Running') & (dft.Power_PowerAct == 0))].OperationalCondition.count())]})
p = figure(plot_width=500,plot_height=50, tools="hover", tooltips="$name: @$name", toolbar_location=None)
p.axis.visible = False
p.xgrid.visible = False
p.ygrid.visible = False
p.x_range.range_padding = 0
p.y_range.range_padding = 0
p.hbar_stack(['Validation Engines UP and Running', 'Validation Engines not Running'], y=10, width=0.9, color=['green', 'grey'], source=bar_source)
access_time=Div(text='<small>Access time: '+datetime.now().strftime('%d.%m.%y %H:%M')+'</small>')
elements=elements+[info_text, p, access_time, oph_info_tit, oph_info, spec_eng, tit_run, table_run, tit_con, table_con, tit_alarm, table_alarm, [trip_div]]
text_lay=layout(children=elements)
return text_lay
############################# Dieter
def demonstrated_Reliabillity_Plot(vl, beta=1.21, T=30000, s=1000, ft=pd.DataFrame, cl=[10, 50, 90], xmin=None, xmax=None, factor=2.0, ymax=24000):
"""Plot the demonstrated Reliability of the specified validation fleet
Example:
....
# load input data from files
dval = pd.read_csv("input2.csv",sep=';', encoding='utf-8')
dval['val start'] = pd.to_datetime(dval['val start'], format='%d.%m.%Y')
failures = pd.read_csv("failures.csv",sep=';', encoding='utf-8')
failures['date'] = pd.to_datetime(failures['date'], format='%d.%m.%Y')
dmyplant2.demonstrated_Reliabillity_Plot(vl,
beta=1.21, T=30000, s=1000, ft=failures, cl=[10,50,90], factor=1.3);
...
Args:
vl ([dmyplant2.Validation class]): [Class with several function around the validation fleet]
beta (float, optional): [Weibull beta parameter]. Defaults to 1.21.
T (int, optional): [Runtime for Assessment of Reliabiliy, calculated with LIPSON Method]. Defaults to 30000.
s (int, optional): [number of points to plot]. Defaults to 1000.
ft ([type], optional): [pd.DataFrame with observed failures]. Defaults to pd.DataFrame.
required Columns: date;failures;serialNumber;comment
cl (list, optional): [list with relialibilty lines for specific confidence levels to plot,
Numbers between 0 and 100]. Defaults to [10, 50, 90].
xmin ([timestamp], optional): [left margin of x-axis]. Defaults to None.
xmax ([timestamp], optional): [right margin of x-axis]. Defaults to None.
factor (float, optional): [Extrapolation factor]. Defaults to 2.0.
ymax (int, optional): [right y-axis max value]. Defaults to 24000.
Raises:
ValueError: [Time Range not properly specified]
"""
# define milestones
start_ts = vl.valstart_ts if xmin == None else xmin # val start
# the end of the Plotting interval
if xmax:
last_ts = xmax
else:
if factor:
factor = max(factor, 1.0) # no factor < 1.0 allowed
elapsed = vl.now_ts - start_ts
last_ts = start_ts + factor * elapsed
else:
raise ValueError("Error in timerange specification.")
fcol = 'grey'
# calculate the x axis timerange first
tr = demonstrated_reliability_sr(vl,
start_ts, last_ts, beta=beta, size=s, ft=ft)[0] # timestamp x axis start .. end
# determine the array - index of 'now'
n_i = _idx(s, start_ts, last_ts, vl.now_ts)
# create Timerow from Start to 'now'
n_tr = tr[0:n_i:1]
# convert to datetime dates - start .. last
dtr = [datetime.fromtimestamp(t) for t in tr]
# calculate demonstrated reliability curves for the complete period,
# confidence intervals CL :
rel = {c: demonstrated_reliability_sr(vl, start_ts, last_ts,
CL=c/100.0, beta=beta, size=s, ft=ft, T=T)[1] for c in cl}
# convert to datetime dates - start .. now
n_dtr = [datetime.fromtimestamp(t) for t in n_tr]
# copy demontrated reliability values for the validation period up to now:
n_rel = {c: rel[c][0:n_i:1] for c in cl}
# define the PLOT
fig, ax1 = plt.subplots( # pylint: disable=unused-variable
figsize=(12, 8), constrained_layout=True)
# fig, (ax1, ax3) = plt.subplots(2, figsize=(6, 6))
color = 'tab:red'
ax1.set_xlabel('date')
ax1.set_ylabel('Demonstrated Reliability [%]', color=color)
ax1.set_title('Demonstrated Reliability [%]')
# now plot the demonstrated reliability curves:
for CL in |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.