max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
imaginarium/views/users/urls.py | LordFeratum/Imaginarium | 0 | 6618051 | <gh_stars>0
from imaginarium.views.users.view import (
get_users,
get_user,
insert_user
)
urls = [
('GET', '/company/{company_id:\d+}/users', get_users,
{'name': 'users:users_list'}),
('GET', '/company/{company_id:\d+}/user/{id:\d+}', get_user,
{'name': 'users:user_detail'}),
('POST', '/company/{company_id:\d+}/user', insert_user,
{'name': 'users:user_add'}),
]
| from imaginarium.views.users.view import (
get_users,
get_user,
insert_user
)
urls = [
('GET', '/company/{company_id:\d+}/users', get_users,
{'name': 'users:users_list'}),
('GET', '/company/{company_id:\d+}/user/{id:\d+}', get_user,
{'name': 'users:user_detail'}),
('POST', '/company/{company_id:\d+}/user', insert_user,
{'name': 'users:user_add'}),
] | none | 1 | 1.527781 | 2 | |
src/config/error.py | swordysrepo/youtube_discord_bot | 1 | 6618052 | <filename>src/config/error.py
class InvalidResponseError(ValueError):
'''command-line response is invalid '''
pass | <filename>src/config/error.py
class InvalidResponseError(ValueError):
'''command-line response is invalid '''
pass | en | 0.886074 | command-line response is invalid | 1.576483 | 2 |
watcher/src/bai_watcher/__main__.py | gavinmbell/benchmark-ai-1 | 6 | 6618053 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
def main(argv=None):
from bai_kafka_utils.kafka_service_args import get_kafka_service_config
from bai_kafka_utils.logging import configure_logging
from bai_watcher import SERVICE_NAME, SERVICE_DESCRIPTION
from bai_watcher.args import get_watcher_service_config
common_kafka_cfg = get_kafka_service_config(SERVICE_NAME, argv)
service_cfg = get_watcher_service_config(argv)
configure_logging(level=common_kafka_cfg.logging_level)
from bai_watcher import service_logger
service_logger.setLevel(service_cfg.logging_level)
from bai_watcher.kafka_service_watcher import create_service
logger = service_logger.getChild(SERVICE_NAME)
logger.info(f"Starting {SERVICE_NAME} Service: {SERVICE_DESCRIPTION}")
logger.info("common_args = %s", common_kafka_cfg)
logger.info("service_specific_args = %s", service_cfg)
service = create_service(common_kafka_cfg, service_cfg)
service.run_loop()
if __name__ == "__main__":
main()
| # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
def main(argv=None):
from bai_kafka_utils.kafka_service_args import get_kafka_service_config
from bai_kafka_utils.logging import configure_logging
from bai_watcher import SERVICE_NAME, SERVICE_DESCRIPTION
from bai_watcher.args import get_watcher_service_config
common_kafka_cfg = get_kafka_service_config(SERVICE_NAME, argv)
service_cfg = get_watcher_service_config(argv)
configure_logging(level=common_kafka_cfg.logging_level)
from bai_watcher import service_logger
service_logger.setLevel(service_cfg.logging_level)
from bai_watcher.kafka_service_watcher import create_service
logger = service_logger.getChild(SERVICE_NAME)
logger.info(f"Starting {SERVICE_NAME} Service: {SERVICE_DESCRIPTION}")
logger.info("common_args = %s", common_kafka_cfg)
logger.info("service_specific_args = %s", service_cfg)
service = create_service(common_kafka_cfg, service_cfg)
service.run_loop()
if __name__ == "__main__":
main()
| en | 0.874973 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. | 1.956186 | 2 |
supervised/base/baseGradientBoosting.py | SaadChaouki/ml-eli5-cli5 | 1 | 6618054 | import numpy as np
from supervised.regression.decisionTreeRegressor import DecisionTreeRegressor
class BaseGradientBoosting(object):
def __init__(self, max_depth=2, num_estimators=100, minimum_sample_leaf=10, learning_rate=.1):
self.max_depth = max_depth
self.num_estimators = num_estimators
self.minimum_sample_leaf = minimum_sample_leaf
self.learning_rate = learning_rate
self.models = [DecisionTreeRegressor(
max_depth=self.max_depth,
minimum_sample_leaf=self.minimum_sample_leaf
) for _ in range(self.num_estimators)]
self.loss = None
self.transformation = None
def fit(self, X, y):
# Starting with the average of y
y_predicted = np.full(len(y), np.mean(y))
# Fitting the models
for model in self.models:
gradient = self.loss.gradient(y, y_predicted)
model.fit(X, gradient)
gradient_prediction = model.predict(X)
y_predicted -= self.learning_rate * np.array(gradient_prediction)
def predict(self, X):
y_pred = np.array([])
# Make predictions
for model in self.models:
update = model.predict(X)
update = np.multiply(self.learning_rate, update)
y_pred = -update if not y_pred.any() else y_pred - update
return self.transformation(y_pred) | import numpy as np
from supervised.regression.decisionTreeRegressor import DecisionTreeRegressor
class BaseGradientBoosting(object):
def __init__(self, max_depth=2, num_estimators=100, minimum_sample_leaf=10, learning_rate=.1):
self.max_depth = max_depth
self.num_estimators = num_estimators
self.minimum_sample_leaf = minimum_sample_leaf
self.learning_rate = learning_rate
self.models = [DecisionTreeRegressor(
max_depth=self.max_depth,
minimum_sample_leaf=self.minimum_sample_leaf
) for _ in range(self.num_estimators)]
self.loss = None
self.transformation = None
def fit(self, X, y):
# Starting with the average of y
y_predicted = np.full(len(y), np.mean(y))
# Fitting the models
for model in self.models:
gradient = self.loss.gradient(y, y_predicted)
model.fit(X, gradient)
gradient_prediction = model.predict(X)
y_predicted -= self.learning_rate * np.array(gradient_prediction)
def predict(self, X):
y_pred = np.array([])
# Make predictions
for model in self.models:
update = model.predict(X)
update = np.multiply(self.learning_rate, update)
y_pred = -update if not y_pred.any() else y_pred - update
return self.transformation(y_pred) | en | 0.863535 | # Starting with the average of y # Fitting the models # Make predictions | 2.944406 | 3 |
Model/Connector_Resnet2D.py | CVPR2020/EnAET | 3 | 6618055 | <reponame>CVPR2020/EnAET<filename>Model/Connector_Resnet2D.py<gh_stars>1-10
# /*******************************************************************************
# * Author : CVPR2020_EnAET
# *******************************************************************************/
import torch
import torch.nn as nn
from Model.Resnet_CLF import Resnet_CLF
from Model.Resnet_2D import Atten_ResNet,Bottleneck
class Connector_Resnet2D(nn.Module):
def __init__(self,
transform_classes=6,num_class=10,
nchannels=512,aet_channels=128,
cls_type='MultLayerFC2',run_type=0):
"""
:param _num_stages: block combination
:param _use_avg_on_conv3: finally use avg or not
:param indim:
:param num_classes: transformation matrix
"""
super(Connector_Resnet2D, self).__init__()
self.fc = nn.Linear(aet_channels * 2, transform_classes)
self.Backbone=Atten_ResNet(Bottleneck, [3, 8, 36, 3],aet_channels,num_stages=6)
self.clf=Resnet_CLF(nchannels, num_class, _cls_type=cls_type)
self.run_type=run_type
def forward(self, x1, x2, out_feat_keys=None):
if out_feat_keys==None:
if self.run_type==0:
use_output_key=['block2','classifier']
x1,attention_matrix1 = self.Backbone(x1, use_output_key)
x2,attention_matrix2 = self.Backbone(x2, use_output_key)
classify_input=x1[0]
#print(classify_input)
classify_output=self.clf(classify_input)
#in this semi-supervised, we do not use attention in the classifier part
transform_input1=x1[1]
transform_input2=x2[1]
x = torch.cat((transform_input1, transform_input2), dim=1)
transform_output =self.fc(x)
else:
x1, attention_matrix1 = self.Backbone(x1, out_feat_keys)
x2, attention_matrix2 = self.Backbone(x2, out_feat_keys)
#x1,attention_matrix1=self.attention(x1)
#x2,attention_matrix2=self.attention(x2)
if out_feat_keys == None:
return x1, x2, transform_output,classify_output,attention_matrix1,attention_matrix2
else:
return x1, x2,attention_matrix1,attention_matrix2
| # /*******************************************************************************
# * Author : CVPR2020_EnAET
# *******************************************************************************/
import torch
import torch.nn as nn
from Model.Resnet_CLF import Resnet_CLF
from Model.Resnet_2D import Atten_ResNet,Bottleneck
class Connector_Resnet2D(nn.Module):
def __init__(self,
transform_classes=6,num_class=10,
nchannels=512,aet_channels=128,
cls_type='MultLayerFC2',run_type=0):
"""
:param _num_stages: block combination
:param _use_avg_on_conv3: finally use avg or not
:param indim:
:param num_classes: transformation matrix
"""
super(Connector_Resnet2D, self).__init__()
self.fc = nn.Linear(aet_channels * 2, transform_classes)
self.Backbone=Atten_ResNet(Bottleneck, [3, 8, 36, 3],aet_channels,num_stages=6)
self.clf=Resnet_CLF(nchannels, num_class, _cls_type=cls_type)
self.run_type=run_type
def forward(self, x1, x2, out_feat_keys=None):
if out_feat_keys==None:
if self.run_type==0:
use_output_key=['block2','classifier']
x1,attention_matrix1 = self.Backbone(x1, use_output_key)
x2,attention_matrix2 = self.Backbone(x2, use_output_key)
classify_input=x1[0]
#print(classify_input)
classify_output=self.clf(classify_input)
#in this semi-supervised, we do not use attention in the classifier part
transform_input1=x1[1]
transform_input2=x2[1]
x = torch.cat((transform_input1, transform_input2), dim=1)
transform_output =self.fc(x)
else:
x1, attention_matrix1 = self.Backbone(x1, out_feat_keys)
x2, attention_matrix2 = self.Backbone(x2, out_feat_keys)
#x1,attention_matrix1=self.attention(x1)
#x2,attention_matrix2=self.attention(x2)
if out_feat_keys == None:
return x1, x2, transform_output,classify_output,attention_matrix1,attention_matrix2
else:
return x1, x2,attention_matrix1,attention_matrix2 | en | 0.297702 | # /******************************************************************************* # * Author : CVPR2020_EnAET # *******************************************************************************/ :param _num_stages: block combination :param _use_avg_on_conv3: finally use avg or not :param indim: :param num_classes: transformation matrix #print(classify_input) #in this semi-supervised, we do not use attention in the classifier part #x1,attention_matrix1=self.attention(x1) #x2,attention_matrix2=self.attention(x2) | 2.149516 | 2 |
build/lib/test/circuit.py | hwengineer/rf_linkbudget | 2 | 6618056 | import unittest
import numpy as np
import rf_linkbudget as rf
class TestCircuit(unittest.TestCase):
def test_SourceN0(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
sink = rf.Sink("Sink")
src['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractValues('Tn', freq=0, power=0)[1][0] == rf.RFMath.T0
assert sim.extractValues('Tn', freq=0, power=0)[1][1] == rf.RFMath.T0
assert sim.extractValues('n', freq=0, power=0)[1][0] == rf.RFMath.N0
assert sim.extractValues('n', freq=0, power=0)[1][1] == rf.RFMath.N0
assert sim.extractValues('NF', freq=0, power=0)[1][0] == 0
assert sim.extractValues('NF', freq=0, power=0)[1][1] == 0
def test_Lna1dB(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
lna = rf.Amplifier("LNA",
Gain=20,
NF=1,
OP1dB=20,
OIP3=40)
sink = rf.Sink("Sink")
src['out'] >> lna['in']
lna['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractLastValues('n', freq=0, power=0) == (rf.RFMath.N0 +20 +1)
assert np.round(sim.extractLastValues('NF', freq=0, power=0),decimals=4) == 1
def test_Attenuator10dB(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
att = rf.Attenuator("Attenuator",
Att=np.array([10])
)
sink = rf.Sink("Sink")
src['out'] >> att['in']
att['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractLastValues('n', freq=0, power=0) == rf.RFMath.N0
assert np.round(sim.extractLastValues('NF', freq=0, power=0),decimals=4) == 10
if __name__ == '__main__':
unittest.main()
| import unittest
import numpy as np
import rf_linkbudget as rf
class TestCircuit(unittest.TestCase):
def test_SourceN0(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
sink = rf.Sink("Sink")
src['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractValues('Tn', freq=0, power=0)[1][0] == rf.RFMath.T0
assert sim.extractValues('Tn', freq=0, power=0)[1][1] == rf.RFMath.T0
assert sim.extractValues('n', freq=0, power=0)[1][0] == rf.RFMath.N0
assert sim.extractValues('n', freq=0, power=0)[1][1] == rf.RFMath.N0
assert sim.extractValues('NF', freq=0, power=0)[1][0] == 0
assert sim.extractValues('NF', freq=0, power=0)[1][1] == 0
def test_Lna1dB(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
lna = rf.Amplifier("LNA",
Gain=20,
NF=1,
OP1dB=20,
OIP3=40)
sink = rf.Sink("Sink")
src['out'] >> lna['in']
lna['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractLastValues('n', freq=0, power=0) == (rf.RFMath.N0 +20 +1)
assert np.round(sim.extractLastValues('NF', freq=0, power=0),decimals=4) == 1
def test_Attenuator10dB(self):
cr = rf.Circuit('Example')
src = rf.Source("Source")
att = rf.Attenuator("Attenuator",
Att=np.array([10])
)
sink = rf.Sink("Sink")
src['out'] >> att['in']
att['out'] >> sink['in']
# create callback function
def cb_src(self, f, p):
return {'f': f, 'p': p, 'Tn': rf.RFMath.T0}
src['out'].regCallback(cb_src)
cr.finalise()
sim = cr.simulate(network=cr.net,
start=cr['Source'],
end=cr['Sink'],
freq=[0],
power=[0])
sim.setNoiseBandwidth(1)
assert sim.extractLastValues('n', freq=0, power=0) == rf.RFMath.N0
assert np.round(sim.extractLastValues('NF', freq=0, power=0),decimals=4) == 10
if __name__ == '__main__':
unittest.main()
| en | 0.086231 | # create callback function # create callback function # create callback function | 2.548025 | 3 |
contest_questions/distinct_element.py | mukul20-21/python_datastructure | 0 | 6618057 | <reponame>mukul20-21/python_datastructure
n = int(input())
item = list(map(int,input().split()))
uqi = set(item)
print(len(uqi)) | n = int(input())
item = list(map(int,input().split()))
uqi = set(item)
print(len(uqi)) | none | 1 | 2.897409 | 3 | |
lookahead_minimax.py | JCBrouwer/lookahead_minimax | 2 | 6618058 | from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class LookaheadMinimax(Optimizer):
r"""
A PyTorch implementation of the lookahead wrapper for GANs.
This optimizer performs the lookahead step on both the discriminator and generator optimizers after the generator's
optimizer takes a step. This ensures that joint minimax lookahead is used rather than alternating minimax lookahead
(which would result from simply applying the original Lookahead Optimizer to both networks separately).
Lookahead Minimax Optimizer: https://arxiv.org/abs/2006.14567
Lookahead Optimizer: https://arxiv.org/abs/1907.08610
"""
def __init__(self, G_optimizer, D_optimizer, la_steps=5, la_alpha=0.5, pullback_momentum="none", accumulate=1):
"""
G_optimizer: generator optimizer
D_optimizer: discriminator optimizer
la_steps (int): number of lookahead steps
la_alpha (float): linear interpolation factor. 1.0 recovers the inner optimizer.
pullback_momentum (str): change to inner optimizer momentum on interpolation update
acumulate (int): number of gradient accumulation steps
"""
self.G_optimizer = G_optimizer
self.D_optimizer = D_optimizer
self._la_step = 0 # counter for inner optimizer
self.la_alpha = la_alpha
self._total_la_steps = la_steps * accumulate
self._la_steps = la_steps
pullback_momentum = pullback_momentum.lower()
assert pullback_momentum in ["reset", "pullback", "none"]
self.pullback_momentum = pullback_momentum
self.state = defaultdict(dict)
# Cache the current optimizer parameters
for group in G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_G_params"] = torch.zeros_like(p.data)
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_G_mom"] = torch.zeros_like(p.data)
for group in D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_D_params"] = torch.zeros_like(p.data)
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_D_mom"] = torch.zeros_like(p.data)
def __getstate__(self):
return {
"state": self.state,
"G_optimizer": self.G_optimizer,
"D_optimizer": self.D_optimizer,
"la_alpha": self.la_alpha,
"_la_step": self._la_step,
"_total_la_steps": self._la_steps,
"pullback_momentum": self.pullback_momentum,
}
def zero_grad(self):
self.G_optimizer.zero_grad()
def get_la_step(self):
return self._la_step
def state_dict(self):
return self.G_optimizer.state_dict()
def load_state_dict(self, G_state_dict, D_state_dict):
self.G_optimizer.load_state_dict(G_state_dict)
self.D_optimizer.load_state_dict(D_state_dict)
# Cache the current optimizer parameters
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_G_params"] = torch.zeros_like(p.data)
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_G_mom"] = self.G_optimizer.state[p]["momentum_buffer"]
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_D_params"] = torch.zeros_like(p.data)
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_D_mom"] = self.D_optimizer.state[p]["momentum_buffer"]
def _backup_and_load_cache(self):
"""
Useful for performing evaluation on the slow weights (which typically generalize better)
"""
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["backup_G_params"] = torch.zeros_like(p.data)
param_state["backup_G_params"].copy_(p.data)
p.data.copy_(param_state["cached_G_params"])
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["backup_D_params"] = torch.zeros_like(p.data)
param_state["backup_D_params"].copy_(p.data)
p.data.copy_(param_state["cached_D_params"])
def _clear_and_load_backup(self):
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.copy_(param_state["backup_G_params"])
del param_state["backup_G_params"]
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.copy_(param_state["backup_D_params"])
del param_state["backup_D_params"]
@property
def param_groups(self):
return self.G_optimizer.param_groups
def step(self, closure=None):
"""
Performs a single Lookahead optimization step on BOTH optimizers after the generator's optimizer step.
This allows the discriminator's optimizer to take more steps when using a higher step ratio and still have the
lookahead step being performed once after k generator steps. This also ensures the optimizers are updated with
the lookahead step simultaneously, rather than in alternating fashion.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self.G_optimizer.step(closure)
self._la_step += 1
if self._la_step >= self._total_la_steps:
self._la_step = 0
# Lookahead and cache the current generator optimizer parameters
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state["cached_G_params"])
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
internal_momentum = self.G_optimizer.state[p]["momentum_buffer"]
self.G_optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.la_alpha).add_(
1.0 - self.la_alpha, param_state["cached_G_mom"]
)
param_state["cached_G_mom"] = self.G_optimizer.state[p]["momentum_buffer"]
elif self.pullback_momentum == "reset":
self.G_optimizer.state[p]["momentum_buffer"] = torch.zeros_like(p.data)
# Lookahead and cache the current discriminator optimizer parameters
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state["cached_D_params"])
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
internal_momentum = self.D_optimizer.state[p]["momentum_buffer"]
self.D_optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.la_alpha).add_(
1.0 - self.la_alpha, param_state["cached_D_mom"]
)
param_state["cached_D_mom"] = self.optimizer.state[p]["momentum_buffer"]
elif self.pullback_momentum == "reset":
self.D_optimizer.state[p]["momentum_buffer"] = torch.zeros_like(p.data)
return loss
| from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class LookaheadMinimax(Optimizer):
r"""
A PyTorch implementation of the lookahead wrapper for GANs.
This optimizer performs the lookahead step on both the discriminator and generator optimizers after the generator's
optimizer takes a step. This ensures that joint minimax lookahead is used rather than alternating minimax lookahead
(which would result from simply applying the original Lookahead Optimizer to both networks separately).
Lookahead Minimax Optimizer: https://arxiv.org/abs/2006.14567
Lookahead Optimizer: https://arxiv.org/abs/1907.08610
"""
def __init__(self, G_optimizer, D_optimizer, la_steps=5, la_alpha=0.5, pullback_momentum="none", accumulate=1):
"""
G_optimizer: generator optimizer
D_optimizer: discriminator optimizer
la_steps (int): number of lookahead steps
la_alpha (float): linear interpolation factor. 1.0 recovers the inner optimizer.
pullback_momentum (str): change to inner optimizer momentum on interpolation update
acumulate (int): number of gradient accumulation steps
"""
self.G_optimizer = G_optimizer
self.D_optimizer = D_optimizer
self._la_step = 0 # counter for inner optimizer
self.la_alpha = la_alpha
self._total_la_steps = la_steps * accumulate
self._la_steps = la_steps
pullback_momentum = pullback_momentum.lower()
assert pullback_momentum in ["reset", "pullback", "none"]
self.pullback_momentum = pullback_momentum
self.state = defaultdict(dict)
# Cache the current optimizer parameters
for group in G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_G_params"] = torch.zeros_like(p.data)
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_G_mom"] = torch.zeros_like(p.data)
for group in D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_D_params"] = torch.zeros_like(p.data)
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_D_mom"] = torch.zeros_like(p.data)
def __getstate__(self):
return {
"state": self.state,
"G_optimizer": self.G_optimizer,
"D_optimizer": self.D_optimizer,
"la_alpha": self.la_alpha,
"_la_step": self._la_step,
"_total_la_steps": self._la_steps,
"pullback_momentum": self.pullback_momentum,
}
def zero_grad(self):
self.G_optimizer.zero_grad()
def get_la_step(self):
return self._la_step
def state_dict(self):
return self.G_optimizer.state_dict()
def load_state_dict(self, G_state_dict, D_state_dict):
self.G_optimizer.load_state_dict(G_state_dict)
self.D_optimizer.load_state_dict(D_state_dict)
# Cache the current optimizer parameters
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_G_params"] = torch.zeros_like(p.data)
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_G_mom"] = self.G_optimizer.state[p]["momentum_buffer"]
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["cached_D_params"] = torch.zeros_like(p.data)
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
param_state["cached_D_mom"] = self.D_optimizer.state[p]["momentum_buffer"]
def _backup_and_load_cache(self):
"""
Useful for performing evaluation on the slow weights (which typically generalize better)
"""
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["backup_G_params"] = torch.zeros_like(p.data)
param_state["backup_G_params"].copy_(p.data)
p.data.copy_(param_state["cached_G_params"])
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
param_state["backup_D_params"] = torch.zeros_like(p.data)
param_state["backup_D_params"].copy_(p.data)
p.data.copy_(param_state["cached_D_params"])
def _clear_and_load_backup(self):
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.copy_(param_state["backup_G_params"])
del param_state["backup_G_params"]
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.copy_(param_state["backup_D_params"])
del param_state["backup_D_params"]
@property
def param_groups(self):
return self.G_optimizer.param_groups
def step(self, closure=None):
"""
Performs a single Lookahead optimization step on BOTH optimizers after the generator's optimizer step.
This allows the discriminator's optimizer to take more steps when using a higher step ratio and still have the
lookahead step being performed once after k generator steps. This also ensures the optimizers are updated with
the lookahead step simultaneously, rather than in alternating fashion.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self.G_optimizer.step(closure)
self._la_step += 1
if self._la_step >= self._total_la_steps:
self._la_step = 0
# Lookahead and cache the current generator optimizer parameters
for group in self.G_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state["cached_G_params"])
param_state["cached_G_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
internal_momentum = self.G_optimizer.state[p]["momentum_buffer"]
self.G_optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.la_alpha).add_(
1.0 - self.la_alpha, param_state["cached_G_mom"]
)
param_state["cached_G_mom"] = self.G_optimizer.state[p]["momentum_buffer"]
elif self.pullback_momentum == "reset":
self.G_optimizer.state[p]["momentum_buffer"] = torch.zeros_like(p.data)
# Lookahead and cache the current discriminator optimizer parameters
for group in self.D_optimizer.param_groups:
for p in group["params"]:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state["cached_D_params"])
param_state["cached_D_params"].copy_(p.data)
if self.pullback_momentum == "pullback":
internal_momentum = self.D_optimizer.state[p]["momentum_buffer"]
self.D_optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.la_alpha).add_(
1.0 - self.la_alpha, param_state["cached_D_mom"]
)
param_state["cached_D_mom"] = self.optimizer.state[p]["momentum_buffer"]
elif self.pullback_momentum == "reset":
self.D_optimizer.state[p]["momentum_buffer"] = torch.zeros_like(p.data)
return loss
| en | 0.737099 | A PyTorch implementation of the lookahead wrapper for GANs. This optimizer performs the lookahead step on both the discriminator and generator optimizers after the generator's optimizer takes a step. This ensures that joint minimax lookahead is used rather than alternating minimax lookahead (which would result from simply applying the original Lookahead Optimizer to both networks separately). Lookahead Minimax Optimizer: https://arxiv.org/abs/2006.14567 Lookahead Optimizer: https://arxiv.org/abs/1907.08610 G_optimizer: generator optimizer D_optimizer: discriminator optimizer la_steps (int): number of lookahead steps la_alpha (float): linear interpolation factor. 1.0 recovers the inner optimizer. pullback_momentum (str): change to inner optimizer momentum on interpolation update acumulate (int): number of gradient accumulation steps # counter for inner optimizer # Cache the current optimizer parameters # Cache the current optimizer parameters Useful for performing evaluation on the slow weights (which typically generalize better) Performs a single Lookahead optimization step on BOTH optimizers after the generator's optimizer step. This allows the discriminator's optimizer to take more steps when using a higher step ratio and still have the lookahead step being performed once after k generator steps. This also ensures the optimizers are updated with the lookahead step simultaneously, rather than in alternating fashion. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. # Lookahead and cache the current generator optimizer parameters # Lookahead and cache the current discriminator optimizer parameters | 2.926912 | 3 |
vwgec/features/base_feature.py | snukky/vwgec | 0 | 6618059 | <reponame>snukky/vwgec<filename>vwgec/features/base_feature.py
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from logger import log
class BaseFeature(object):
def __init__(self, window=3, factor=0, weight=1.0):
self.window = window
self.factor = factor
self.weight = weight
def extract(self, cword, csets, sentence, vector):
raise NotImplementedError()
def left_context(self, pos, sentence, window=None, factor=None):
window = self.window if window is None else window
factor = self.factor if factor is None else factor
start = max(0, pos[0] - window)
bos_size = max(0, window - pos[0])
return (["<s>"] * bos_size) + sentence[factor][start:pos[0]]
def right_context(self, pos, sentence, window=None, factor=None):
window = self.window if window is None else window
factor = self.factor if factor is None else factor
num_toks = len(sentence[factor])
end = min(num_toks, pos[1] + window)
eos_size = max(0, pos[1] + window - num_toks)
return sentence[factor][pos[1]:end] + (["</s>"] * eos_size)
def both_contexts(self, pos, sentence, window=None, factor=None):
return (self.left_context(pos, sentence, window, factor),
self.right_context(pos, sentence, window, factor))
def bos_size(self, pos):
return max(0, self.window - pos[0])
def eos_size(self, pos, num_toks):
return max(0, pos[1] + self.window - num_toks)
def __str__(self):
return self.__class__
| import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from logger import log
class BaseFeature(object):
def __init__(self, window=3, factor=0, weight=1.0):
self.window = window
self.factor = factor
self.weight = weight
def extract(self, cword, csets, sentence, vector):
raise NotImplementedError()
def left_context(self, pos, sentence, window=None, factor=None):
window = self.window if window is None else window
factor = self.factor if factor is None else factor
start = max(0, pos[0] - window)
bos_size = max(0, window - pos[0])
return (["<s>"] * bos_size) + sentence[factor][start:pos[0]]
def right_context(self, pos, sentence, window=None, factor=None):
window = self.window if window is None else window
factor = self.factor if factor is None else factor
num_toks = len(sentence[factor])
end = min(num_toks, pos[1] + window)
eos_size = max(0, pos[1] + window - num_toks)
return sentence[factor][pos[1]:end] + (["</s>"] * eos_size)
def both_contexts(self, pos, sentence, window=None, factor=None):
return (self.left_context(pos, sentence, window, factor),
self.right_context(pos, sentence, window, factor))
def bos_size(self, pos):
return max(0, self.window - pos[0])
def eos_size(self, pos, num_toks):
return max(0, pos[1] + self.window - num_toks)
def __str__(self):
return self.__class__ | none | 1 | 2.550803 | 3 | |
server/app/WaterTankManager.py | mygs/babilonia | 0 | 6618060 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from threading import Timer
from time import sleep
import requests
import pandas
from sqlalchemy import create_engine, func, and_
from sqlalchemy.orm import sessionmaker
if os.uname()[4].startswith("arm"): # This module can only be run on a Raspberry Pi!
import RPi.GPIO as gpio
import time
import logging
# Page 102
# https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2835/BCM2835-ARM-Peripherals.pdf
###### Server GPIO setup
#
# o V G o X Y o o o o o o o o o o o o o o
# o o o o o A B o o o o o o o o o o o o o
PIN_WATER_TANK_IN = 10 # X
PIN_WATER_TANK_OUT = 12 # Y
PIN_WATER_LEVEL_SENSOR_A = 11 # A (XKC-Y25-V) => orange cable
PIN_WATER_LEVEL_SENSOR_B = 13 # B (XKC-Y25-V) => yellow cable
TIME_TO_DISABLE_SOLENOID_ON = 45 * 60 # secs
class WaterTankManager:
def __init__(self, logger, cfg):
self.logger = logger
self.cfg = cfg
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(PIN_WATER_TANK_IN, gpio.OUT, initial=gpio.LOW)
gpio.setup(PIN_WATER_TANK_OUT, gpio.OUT, initial=gpio.LOW)
gpio.setup(PIN_WATER_LEVEL_SENSOR_A, gpio.IN, pull_up_down=gpio.PUD_DOWN)
gpio.setup(PIN_WATER_LEVEL_SENSOR_B, gpio.IN, pull_up_down=gpio.PUD_DOWN)
self.logger.info("[WaterTankManager] setup completed.")
else:
self.logger.info("[WaterTankManager] setup completed. Fake GPIO")
self.FAKE_WATER_TANK_IN = 0
self.FAKE_WATER_TANK_OUT = 0
def get_current_sensor_level(self):
response = {}
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
response["WATER_LEVEL_SENSOR_A"] = gpio.input(PIN_WATER_LEVEL_SENSOR_A)
response["WATER_LEVEL_SENSOR_B"] = gpio.input(PIN_WATER_LEVEL_SENSOR_B)
else:
response["WATER_LEVEL_SENSOR_A"] = 0
response["WATER_LEVEL_SENSOR_B"] = 0
return response
def get_level_description(self):
description = "ERROR"
if gpio.input(PIN_WATER_LEVEL_SENSOR_A) == 0:
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 0:
description = "FILL"
else:
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 0:
description = "HALF"
else:
description = "FULL"
return description
def fake_data(self):
response = {}
response["WATER_TANK_IN"] = self.FAKE_WATER_TANK_IN
response["WATER_TANK_OUT"] = self.FAKE_WATER_TANK_OUT
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "FAKE"
return response
def get_current_solenoid_status(self):
response = {}
if self.cfg["WATER_TANK"]["MASTER"]:
if os.uname()[4].startswith("arm"):
response["GUI_DESCRIPTION"] = self.get_level_description()
response["WATER_TANK_IN"] = gpio.input(PIN_WATER_TANK_IN)
response["WATER_TANK_OUT"] = gpio.input(PIN_WATER_TANK_OUT)
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 1:
response["WATER_TANK_IN_DISABLE"] = True
else:
response["WATER_TANK_IN_DISABLE"] = False
else:
response["WATER_TANK_IN"] = self.FAKE_WATER_TANK_IN
response["WATER_TANK_OUT"] = self.FAKE_WATER_TANK_OUT
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "FAKE"
else:
url = "http://%s/water-tank" % (self.cfg["WATER_TANK"]["SERVER"])
try:
resp = requests.get(url=url)
response = resp.json()
self.logger.info(
"[WaterTankManager] remote solenoid status response %s", response
)
except requests.exceptions.RequestException as e:
self.logger.error("[WaterTankManager] Ignoring %s", e)
response = self.fake_data()
return response
def get_current_solenoid_status_from_support(self):
engine = create_engine(self.cfg["SQLALCHEMY_DATABASE_URI"])
switch_status = pandas.read_sql_query(
"""
SELECT TIMESTAMP,
DATA->'$.SWITCH_A' AS SWITCH_A,
DATA->'$.SWITCH_B' AS SWITCH_B
FROM farmland.SUPPORT
WHERE TYPE='WATER_TANK'
""",
engine,
)
response = {}
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "NODE"
if not switch_status.empty:
response["WATER_TANK_IN"] = switch_status["SWITCH_A"].iloc[0]
response["WATER_TANK_OUT"] = switch_status["SWITCH_B"].iloc[0]
return response
def monitorTankLevel(self):
if self.cfg["WATER_TANK"]["MONITOR"]:
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
bouncetime = 15 * 60 * 1000 # 15 min delay
gpio.add_event_detect(
PIN_WATER_LEVEL_SENSOR_A,
gpio.BOTH,
callback=self.shouldStartFillingWaterTank,
bouncetime=bouncetime,
)
gpio.add_event_detect(
PIN_WATER_LEVEL_SENSOR_B,
gpio.BOTH,
callback=self.shouldStopFillingWaterTank,
bouncetime=bouncetime,
)
self.logger.info(
"[WaterTankManager] Water tank level monitor is ENABLED"
)
else:
self.logger.info("[WaterTankManager] Water tank level monitor is DISABLED")
def shouldStartFillingWaterTank(self, channel):
if not gpio.input(PIN_WATER_LEVEL_SENSOR_A) and not gpio.input(
PIN_WATER_LEVEL_SENSOR_B
):
self.logger.info("[WaterTankManager] auto START filling water tank")
gpio.output(PIN_WATER_TANK_IN, gpio.HIGH)
def shouldStopFillingWaterTank(self, channel):
if gpio.input(PIN_WATER_LEVEL_SENSOR_B):
self.logger.info("[WaterTankManager] auto STOP filling water tank")
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
def changeStateWaterTankIn(self, state):
if state:
self.logger.info("[WaterTankManager] STARTED filling water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.HIGH)
else:
self.FAKE_WATER_TANK_IN = 1
timer_thread = Timer(
TIME_TO_DISABLE_SOLENOID_ON, self.changeStateWaterTankIn, [False]
)
timer_thread.start()
else:
self.logger.info("[WaterTankManager] STOPED filling water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
else:
self.FAKE_WATER_TANK_IN = 0
def changeStateWaterTankOut(self, state):
if state:
self.logger.info("[WaterTankManager] STARTED using water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_OUT, gpio.HIGH)
else:
self.FAKE_WATER_TANK_OUT = 1
timer_thread = Timer(
TIME_TO_DISABLE_SOLENOID_ON, self.changeStateWaterTankOut, [False]
)
timer_thread.start()
else:
self.logger.info("[WaterTankManager] STOPED using water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_OUT, gpio.LOW)
else:
self.FAKE_WATER_TANK_OUT = 0
def disableWaterTankInAndOut(self):
self.logger.info(
"[WaterTankManager] Turn off water tank solenoids for security purpose!"
)
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
gpio.output(PIN_WATER_TANK_OUT, gpio.LOW)
else:
self.FAKE_WATER_TANK_IN = 0
self.FAKE_WATER_TANK_OUT = 0
if __name__ == "__main__":
print("*** STARTING Water Tank Manager Test ***")
app = WaterTankManager()
app.monitorTankLevel()
while True:
time.sleep(3)
print("*")
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
from threading import Timer
from time import sleep
import requests
import pandas
from sqlalchemy import create_engine, func, and_
from sqlalchemy.orm import sessionmaker
if os.uname()[4].startswith("arm"): # This module can only be run on a Raspberry Pi!
import RPi.GPIO as gpio
import time
import logging
# Page 102
# https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2835/BCM2835-ARM-Peripherals.pdf
###### Server GPIO setup
#
# o V G o X Y o o o o o o o o o o o o o o
# o o o o o A B o o o o o o o o o o o o o
PIN_WATER_TANK_IN = 10 # X
PIN_WATER_TANK_OUT = 12 # Y
PIN_WATER_LEVEL_SENSOR_A = 11 # A (XKC-Y25-V) => orange cable
PIN_WATER_LEVEL_SENSOR_B = 13 # B (XKC-Y25-V) => yellow cable
TIME_TO_DISABLE_SOLENOID_ON = 45 * 60 # secs
class WaterTankManager:
def __init__(self, logger, cfg):
self.logger = logger
self.cfg = cfg
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(PIN_WATER_TANK_IN, gpio.OUT, initial=gpio.LOW)
gpio.setup(PIN_WATER_TANK_OUT, gpio.OUT, initial=gpio.LOW)
gpio.setup(PIN_WATER_LEVEL_SENSOR_A, gpio.IN, pull_up_down=gpio.PUD_DOWN)
gpio.setup(PIN_WATER_LEVEL_SENSOR_B, gpio.IN, pull_up_down=gpio.PUD_DOWN)
self.logger.info("[WaterTankManager] setup completed.")
else:
self.logger.info("[WaterTankManager] setup completed. Fake GPIO")
self.FAKE_WATER_TANK_IN = 0
self.FAKE_WATER_TANK_OUT = 0
def get_current_sensor_level(self):
response = {}
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
response["WATER_LEVEL_SENSOR_A"] = gpio.input(PIN_WATER_LEVEL_SENSOR_A)
response["WATER_LEVEL_SENSOR_B"] = gpio.input(PIN_WATER_LEVEL_SENSOR_B)
else:
response["WATER_LEVEL_SENSOR_A"] = 0
response["WATER_LEVEL_SENSOR_B"] = 0
return response
def get_level_description(self):
description = "ERROR"
if gpio.input(PIN_WATER_LEVEL_SENSOR_A) == 0:
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 0:
description = "FILL"
else:
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 0:
description = "HALF"
else:
description = "FULL"
return description
def fake_data(self):
response = {}
response["WATER_TANK_IN"] = self.FAKE_WATER_TANK_IN
response["WATER_TANK_OUT"] = self.FAKE_WATER_TANK_OUT
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "FAKE"
return response
def get_current_solenoid_status(self):
response = {}
if self.cfg["WATER_TANK"]["MASTER"]:
if os.uname()[4].startswith("arm"):
response["GUI_DESCRIPTION"] = self.get_level_description()
response["WATER_TANK_IN"] = gpio.input(PIN_WATER_TANK_IN)
response["WATER_TANK_OUT"] = gpio.input(PIN_WATER_TANK_OUT)
if gpio.input(PIN_WATER_LEVEL_SENSOR_B) == 1:
response["WATER_TANK_IN_DISABLE"] = True
else:
response["WATER_TANK_IN_DISABLE"] = False
else:
response["WATER_TANK_IN"] = self.FAKE_WATER_TANK_IN
response["WATER_TANK_OUT"] = self.FAKE_WATER_TANK_OUT
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "FAKE"
else:
url = "http://%s/water-tank" % (self.cfg["WATER_TANK"]["SERVER"])
try:
resp = requests.get(url=url)
response = resp.json()
self.logger.info(
"[WaterTankManager] remote solenoid status response %s", response
)
except requests.exceptions.RequestException as e:
self.logger.error("[WaterTankManager] Ignoring %s", e)
response = self.fake_data()
return response
def get_current_solenoid_status_from_support(self):
engine = create_engine(self.cfg["SQLALCHEMY_DATABASE_URI"])
switch_status = pandas.read_sql_query(
"""
SELECT TIMESTAMP,
DATA->'$.SWITCH_A' AS SWITCH_A,
DATA->'$.SWITCH_B' AS SWITCH_B
FROM farmland.SUPPORT
WHERE TYPE='WATER_TANK'
""",
engine,
)
response = {}
response["WATER_TANK_IN_DISABLE"] = False
response["GUI_DESCRIPTION"] = "NODE"
if not switch_status.empty:
response["WATER_TANK_IN"] = switch_status["SWITCH_A"].iloc[0]
response["WATER_TANK_OUT"] = switch_status["SWITCH_B"].iloc[0]
return response
def monitorTankLevel(self):
if self.cfg["WATER_TANK"]["MONITOR"]:
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
bouncetime = 15 * 60 * 1000 # 15 min delay
gpio.add_event_detect(
PIN_WATER_LEVEL_SENSOR_A,
gpio.BOTH,
callback=self.shouldStartFillingWaterTank,
bouncetime=bouncetime,
)
gpio.add_event_detect(
PIN_WATER_LEVEL_SENSOR_B,
gpio.BOTH,
callback=self.shouldStopFillingWaterTank,
bouncetime=bouncetime,
)
self.logger.info(
"[WaterTankManager] Water tank level monitor is ENABLED"
)
else:
self.logger.info("[WaterTankManager] Water tank level monitor is DISABLED")
def shouldStartFillingWaterTank(self, channel):
if not gpio.input(PIN_WATER_LEVEL_SENSOR_A) and not gpio.input(
PIN_WATER_LEVEL_SENSOR_B
):
self.logger.info("[WaterTankManager] auto START filling water tank")
gpio.output(PIN_WATER_TANK_IN, gpio.HIGH)
def shouldStopFillingWaterTank(self, channel):
if gpio.input(PIN_WATER_LEVEL_SENSOR_B):
self.logger.info("[WaterTankManager] auto STOP filling water tank")
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
def changeStateWaterTankIn(self, state):
if state:
self.logger.info("[WaterTankManager] STARTED filling water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.HIGH)
else:
self.FAKE_WATER_TANK_IN = 1
timer_thread = Timer(
TIME_TO_DISABLE_SOLENOID_ON, self.changeStateWaterTankIn, [False]
)
timer_thread.start()
else:
self.logger.info("[WaterTankManager] STOPED filling water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
else:
self.FAKE_WATER_TANK_IN = 0
def changeStateWaterTankOut(self, state):
if state:
self.logger.info("[WaterTankManager] STARTED using water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_OUT, gpio.HIGH)
else:
self.FAKE_WATER_TANK_OUT = 1
timer_thread = Timer(
TIME_TO_DISABLE_SOLENOID_ON, self.changeStateWaterTankOut, [False]
)
timer_thread.start()
else:
self.logger.info("[WaterTankManager] STOPED using water tank")
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_OUT, gpio.LOW)
else:
self.FAKE_WATER_TANK_OUT = 0
def disableWaterTankInAndOut(self):
self.logger.info(
"[WaterTankManager] Turn off water tank solenoids for security purpose!"
)
if self.cfg["WATER_TANK"]["MASTER"] and os.uname()[4].startswith("arm"):
gpio.output(PIN_WATER_TANK_IN, gpio.LOW)
gpio.output(PIN_WATER_TANK_OUT, gpio.LOW)
else:
self.FAKE_WATER_TANK_IN = 0
self.FAKE_WATER_TANK_OUT = 0
if __name__ == "__main__":
print("*** STARTING Water Tank Manager Test ***")
app = WaterTankManager()
app.monitorTankLevel()
while True:
time.sleep(3)
print("*")
| en | 0.293521 | #!/usr/bin/python3 # -*- coding: utf-8 -*- # This module can only be run on a Raspberry Pi! # Page 102 # https://www.raspberrypi.org/documentation/hardware/raspberrypi/bcm2835/BCM2835-ARM-Peripherals.pdf ###### Server GPIO setup # # o V G o X Y o o o o o o o o o o o o o o # o o o o o A B o o o o o o o o o o o o o # X # Y # A (XKC-Y25-V) => orange cable # B (XKC-Y25-V) => yellow cable # secs SELECT TIMESTAMP, DATA->'$.SWITCH_A' AS SWITCH_A, DATA->'$.SWITCH_B' AS SWITCH_B FROM farmland.SUPPORT WHERE TYPE='WATER_TANK' # 15 min delay | 2.324691 | 2 |
staticpy/jit.py | SnowWalkerJ/StaticPy | 13 | 6618061 | <filename>staticpy/jit.py
import importlib
import inspect
import os
import sys
from .template import CppTemplate
from .bind import PyBindFunction, PyBindModule
from .common.options import get_option
from .common.phase import TwoPhaseFunction
from .compiler import Compiler
from .translator import BaseTranslator
from .session import new_session, get_session
from .common.string import get_target_filepath
from .lang.common import get_block_or_create
from .lang import (
statement as S,
expression as E,
macro as M,
block as B,
type as T,
variable as V,
)
class JitObject(TwoPhaseFunction):
def __init__(self, name, obj, env={}):
self.name = name or obj.__name__
self.obj = obj
self.env = env.copy()
self.env[self.name] = V.Name(self.name)
self._signatures = []
self._compiled = False
self._compiled_obj = None
self._source_path, self._target_path = self._get_paths(obj)
def compile(self):
sess = new_session()
self._add_definition(sess)
sess.finalize()
self._bind(sess)
self._compile(sess)
def load(self):
module_name = self.name
if module_name in sys.modules:
del sys.modules[module_name]
sys.path.insert(0, os.path.dirname(self._target_path))
try:
module = importlib.import_module(module_name)
self._compiled_obj = getattr(module, self.name)
finally:
del sys.path[0]
def building(self, *args):
self._add_definition(get_session())
return E.CallFunction(self.name, args)
def normal(self, *args):
if not self._compiled:
if get_option("force_compile", False) or self._need_update():
self.compile()
self.load()
self._compiled = True
self.__doc__ = self._compiled_obj.__doc__
return self._compiled_obj(*args)
def declare(self):
declarations = []
for stmt in self._block.statements:
if isinstance(stmt, S.BlockStatement):
block = stmt.block
declarations.append(S.SimpleStatement(block.prefix()[:-2] + ";"))
return declarations
def _add_definition(self, sess):
sess.add_definition(self)
self._translate(sess)
def _translate(self, sess):
translator = BaseTranslator(self.env, session=sess)
source = self._get_source(self.obj)
self._block = translator.translate(source)
return self._block
@staticmethod
def _get_source(obj):
if isinstance(obj, str):
with open(obj, "r") as f:
return f.read()
else:
return inspect.getsource(obj)
@staticmethod
def _get_paths(obj):
if inspect.ismodule(obj) or inspect.isfunction(obj) or inspect.isclass(obj):
sourcepath = inspect.getsourcefile(obj)
path = os.path.dirname(sourcepath)
name = obj.__name__
else:
sourcepath = obj
path = os.path.dirname(sourcepath)
name = os.path.basename(obj).split(".")[0]
sourcepath = os.path.abspath(sourcepath)
targetpath = os.path.abspath(get_target_filepath(path, name))
return sourcepath, targetpath
def _bind(self, sess):
with sess:
with get_block_or_create("header"):
M.defineM("PYBIND")
block = get_block_or_create("main")
PyBindModule(self.name, block).setup(sess)
def _compile(self, sess):
compiler = Compiler()
compiler.add_template(".cpp", CppTemplate())
compiler.run(sess, os.path.dirname(self._target_path), libname=self.name)
def _need_update(self):
if not os.path.exists(self._target_path):
return True
target_mtime = os.path.getmtime(self._target_path)
source_mtime = os.path.getmtime(self._source_path)
return source_mtime > target_mtime
def jit(obj):
frame = inspect.currentframe().f_back
env = dict(__builtins__).copy()
env.update(frame.f_globals)
env.update(frame.f_locals)
return JitObject(obj.__name__, obj, env)
| <filename>staticpy/jit.py
import importlib
import inspect
import os
import sys
from .template import CppTemplate
from .bind import PyBindFunction, PyBindModule
from .common.options import get_option
from .common.phase import TwoPhaseFunction
from .compiler import Compiler
from .translator import BaseTranslator
from .session import new_session, get_session
from .common.string import get_target_filepath
from .lang.common import get_block_or_create
from .lang import (
statement as S,
expression as E,
macro as M,
block as B,
type as T,
variable as V,
)
class JitObject(TwoPhaseFunction):
def __init__(self, name, obj, env={}):
self.name = name or obj.__name__
self.obj = obj
self.env = env.copy()
self.env[self.name] = V.Name(self.name)
self._signatures = []
self._compiled = False
self._compiled_obj = None
self._source_path, self._target_path = self._get_paths(obj)
def compile(self):
sess = new_session()
self._add_definition(sess)
sess.finalize()
self._bind(sess)
self._compile(sess)
def load(self):
module_name = self.name
if module_name in sys.modules:
del sys.modules[module_name]
sys.path.insert(0, os.path.dirname(self._target_path))
try:
module = importlib.import_module(module_name)
self._compiled_obj = getattr(module, self.name)
finally:
del sys.path[0]
def building(self, *args):
self._add_definition(get_session())
return E.CallFunction(self.name, args)
def normal(self, *args):
if not self._compiled:
if get_option("force_compile", False) or self._need_update():
self.compile()
self.load()
self._compiled = True
self.__doc__ = self._compiled_obj.__doc__
return self._compiled_obj(*args)
def declare(self):
declarations = []
for stmt in self._block.statements:
if isinstance(stmt, S.BlockStatement):
block = stmt.block
declarations.append(S.SimpleStatement(block.prefix()[:-2] + ";"))
return declarations
def _add_definition(self, sess):
sess.add_definition(self)
self._translate(sess)
def _translate(self, sess):
translator = BaseTranslator(self.env, session=sess)
source = self._get_source(self.obj)
self._block = translator.translate(source)
return self._block
@staticmethod
def _get_source(obj):
if isinstance(obj, str):
with open(obj, "r") as f:
return f.read()
else:
return inspect.getsource(obj)
@staticmethod
def _get_paths(obj):
if inspect.ismodule(obj) or inspect.isfunction(obj) or inspect.isclass(obj):
sourcepath = inspect.getsourcefile(obj)
path = os.path.dirname(sourcepath)
name = obj.__name__
else:
sourcepath = obj
path = os.path.dirname(sourcepath)
name = os.path.basename(obj).split(".")[0]
sourcepath = os.path.abspath(sourcepath)
targetpath = os.path.abspath(get_target_filepath(path, name))
return sourcepath, targetpath
def _bind(self, sess):
with sess:
with get_block_or_create("header"):
M.defineM("PYBIND")
block = get_block_or_create("main")
PyBindModule(self.name, block).setup(sess)
def _compile(self, sess):
compiler = Compiler()
compiler.add_template(".cpp", CppTemplate())
compiler.run(sess, os.path.dirname(self._target_path), libname=self.name)
def _need_update(self):
if not os.path.exists(self._target_path):
return True
target_mtime = os.path.getmtime(self._target_path)
source_mtime = os.path.getmtime(self._source_path)
return source_mtime > target_mtime
def jit(obj):
frame = inspect.currentframe().f_back
env = dict(__builtins__).copy()
env.update(frame.f_globals)
env.update(frame.f_locals)
return JitObject(obj.__name__, obj, env)
| none | 1 | 1.963139 | 2 | |
Lib/site-packages/PySide/examples/widgets/tooltips/tooltips.py | heylenz/python27 | 32 | 6618062 | <reponame>heylenz/python27<gh_stars>10-100
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import random
from PySide import QtCore, QtGui
import tooltips_rc
class ShapeItem(object):
def __init__(self):
self.myPath = QtGui.QPainterPath()
self.myPosition = QtCore.QPoint()
self.myColor = QtGui.QColor()
self.myToolTip = ''
def path(self):
return self.myPath
def position(self):
return self.myPosition
def color(self):
return self.myColor
def toolTip(self):
return self.myToolTip
def setPath(self, path):
self.myPath = path
def setToolTip(self, toolTip):
self.myToolTip = toolTip
def setPosition(self, position):
self.myPosition = position
def setColor(self, color):
self.myColor = color
class SortingBox(QtGui.QWidget):
circle_count = square_count = triangle_count = 1
def __init__(self):
super(SortingBox, self).__init__()
self.circlePath = QtGui.QPainterPath()
self.squarePath = QtGui.QPainterPath()
self.trianglePath = QtGui.QPainterPath()
self.shapeItems = []
self.previousPosition = QtCore.QPoint()
self.setMouseTracking(True)
self.setBackgroundRole(QtGui.QPalette.Base)
self.itemInMotion = None
self.newCircleButton = self.createToolButton("New Circle",
QtGui.QIcon(':/images/circle.png'), self.createNewCircle)
self.newSquareButton = self.createToolButton("New Square",
QtGui.QIcon(':/images/square.png'), self.createNewSquare)
self.newTriangleButton = self.createToolButton("New Triangle",
QtGui.QIcon(':/images/triangle.png'), self.createNewTriangle)
self.circlePath.addEllipse(0, 0, 100, 100)
self.squarePath.addRect(0, 0, 100, 100)
x = self.trianglePath.currentPosition().x()
y = self.trianglePath.currentPosition().y()
self.trianglePath.moveTo(x + 120 / 2, y)
self.trianglePath.lineTo(0, 100)
self.trianglePath.lineTo(120, 100)
self.trianglePath.lineTo(x + 120 / 2, y)
self.setWindowTitle("Tooltips")
self.resize(500, 300)
self.createShapeItem(self.circlePath, "Circle",
self.initialItemPosition(self.circlePath),
self.initialItemColor())
self.createShapeItem(self.squarePath, "Square",
self.initialItemPosition(self.squarePath),
self.initialItemColor())
self.createShapeItem(self.trianglePath, "Triangle",
self.initialItemPosition(self.trianglePath),
self.initialItemColor())
def event(self, event):
if event.type() == QtCore.QEvent.ToolTip:
helpEvent = event
index = self.itemAt(helpEvent.pos())
if index != -1:
QtGui.QToolTip.showText(helpEvent.globalPos(),
self.shapeItems[index].toolTip())
else:
QtGui.QToolTip.hideText()
event.ignore()
return True
return super(SortingBox, self).event(event)
def resizeEvent(self, event):
margin = self.style().pixelMetric(QtGui.QStyle.PM_DefaultTopLevelMargin)
x = self.width() - margin
y = self.height() - margin
y = self.updateButtonGeometry(self.newCircleButton, x, y)
y = self.updateButtonGeometry(self.newSquareButton, x, y)
self.updateButtonGeometry(self.newTriangleButton, x, y)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
for shapeItem in self.shapeItems:
painter.translate(shapeItem.position())
painter.setBrush(shapeItem.color())
painter.drawPath(shapeItem.path())
painter.translate(-shapeItem.position())
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
index = self.itemAt(event.pos())
if index != -1:
self.itemInMotion = self.shapeItems[index]
self.previousPosition = event.pos()
value = self.shapeItems[index]
del self.shapeItems[index]
self.shapeItems.insert(len(self.shapeItems) - 1, value)
self.update()
def mouseMoveEvent(self, event):
if (event.buttons() & QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
def mouseReleaseEvent(self, event):
if (event.button() == QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
self.itemInMotion = None
def createNewCircle(self):
SortingBox.circle_count += 1
self.createShapeItem(self.circlePath,
"Circle <%d>" % SortingBox.circle_count,
self.randomItemPosition(), self.randomItemColor())
def createNewSquare(self):
SortingBox.square_count += 1
self.createShapeItem(self.squarePath,
"Square <%d>" % SortingBox.square_count,
self.randomItemPosition(), self.randomItemColor())
def createNewTriangle(self):
SortingBox.triangle_count += 1
self.createShapeItem(self.trianglePath,
"Triangle <%d>" % SortingBox.triangle_count,
self.randomItemPosition(), self.randomItemColor())
def itemAt(self, pos):
for i in range(len(self.shapeItems) - 1, -1, -1):
item = self.shapeItems[i]
if item.path().contains(QtCore.QPointF(pos - item.position())):
return i
return -1
def moveItemTo(self, pos):
offset = pos - self.previousPosition
self.itemInMotion.setPosition(self.itemInMotion.position() + offset)
self.previousPosition = QtCore.QPoint(pos)
self.update()
def updateButtonGeometry(self, button, x, y):
size = button.sizeHint()
button.setGeometry(x - size.width(), y - size.height(),
size.width(), size.height())
return y - size.height() - self.style().pixelMetric(QtGui.QStyle.PM_DefaultLayoutSpacing)
def createShapeItem(self, path, toolTip, pos, color):
shapeItem = ShapeItem()
shapeItem.setPath(path)
shapeItem.setToolTip(toolTip)
shapeItem.setPosition(pos)
shapeItem.setColor(color)
self.shapeItems.append(shapeItem)
self.update()
def createToolButton(self, toolTip, icon, member):
button = QtGui.QToolButton(self)
button.setToolTip(toolTip)
button.setIcon(icon)
button.setIconSize(QtCore.QSize(32, 32))
button.clicked.connect(member)
return button
def initialItemPosition(self, path):
y = (self.height() - path.controlPointRect().height()) / 2
if len(self.shapeItems) == 0:
x = ((3 * self.width()) / 2 - path.controlPointRect().width()) / 2
else:
x = (self.width() / len(self.shapeItems) - path.controlPointRect().width()) / 2
return QtCore.QPoint(x, y)
def randomItemPosition(self):
x = random.randint(0, self.width() - 120)
y = random.randint(0, self.height() - 120)
return QtCore.QPoint(x, y)
def initialItemColor(self):
hue = ((len(self.shapeItems) + 1) * 85) % 256
return QtGui.QColor.fromHsv(hue, 255, 190)
def randomItemColor(self):
return QtGui.QColor.fromHsv(random.randint(0, 256), 255, 190)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
sortingBox = SortingBox()
sortingBox.show()
sys.exit(app.exec_())
| #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import random
from PySide import QtCore, QtGui
import tooltips_rc
class ShapeItem(object):
def __init__(self):
self.myPath = QtGui.QPainterPath()
self.myPosition = QtCore.QPoint()
self.myColor = QtGui.QColor()
self.myToolTip = ''
def path(self):
return self.myPath
def position(self):
return self.myPosition
def color(self):
return self.myColor
def toolTip(self):
return self.myToolTip
def setPath(self, path):
self.myPath = path
def setToolTip(self, toolTip):
self.myToolTip = toolTip
def setPosition(self, position):
self.myPosition = position
def setColor(self, color):
self.myColor = color
class SortingBox(QtGui.QWidget):
circle_count = square_count = triangle_count = 1
def __init__(self):
super(SortingBox, self).__init__()
self.circlePath = QtGui.QPainterPath()
self.squarePath = QtGui.QPainterPath()
self.trianglePath = QtGui.QPainterPath()
self.shapeItems = []
self.previousPosition = QtCore.QPoint()
self.setMouseTracking(True)
self.setBackgroundRole(QtGui.QPalette.Base)
self.itemInMotion = None
self.newCircleButton = self.createToolButton("New Circle",
QtGui.QIcon(':/images/circle.png'), self.createNewCircle)
self.newSquareButton = self.createToolButton("New Square",
QtGui.QIcon(':/images/square.png'), self.createNewSquare)
self.newTriangleButton = self.createToolButton("New Triangle",
QtGui.QIcon(':/images/triangle.png'), self.createNewTriangle)
self.circlePath.addEllipse(0, 0, 100, 100)
self.squarePath.addRect(0, 0, 100, 100)
x = self.trianglePath.currentPosition().x()
y = self.trianglePath.currentPosition().y()
self.trianglePath.moveTo(x + 120 / 2, y)
self.trianglePath.lineTo(0, 100)
self.trianglePath.lineTo(120, 100)
self.trianglePath.lineTo(x + 120 / 2, y)
self.setWindowTitle("Tooltips")
self.resize(500, 300)
self.createShapeItem(self.circlePath, "Circle",
self.initialItemPosition(self.circlePath),
self.initialItemColor())
self.createShapeItem(self.squarePath, "Square",
self.initialItemPosition(self.squarePath),
self.initialItemColor())
self.createShapeItem(self.trianglePath, "Triangle",
self.initialItemPosition(self.trianglePath),
self.initialItemColor())
def event(self, event):
if event.type() == QtCore.QEvent.ToolTip:
helpEvent = event
index = self.itemAt(helpEvent.pos())
if index != -1:
QtGui.QToolTip.showText(helpEvent.globalPos(),
self.shapeItems[index].toolTip())
else:
QtGui.QToolTip.hideText()
event.ignore()
return True
return super(SortingBox, self).event(event)
def resizeEvent(self, event):
margin = self.style().pixelMetric(QtGui.QStyle.PM_DefaultTopLevelMargin)
x = self.width() - margin
y = self.height() - margin
y = self.updateButtonGeometry(self.newCircleButton, x, y)
y = self.updateButtonGeometry(self.newSquareButton, x, y)
self.updateButtonGeometry(self.newTriangleButton, x, y)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
for shapeItem in self.shapeItems:
painter.translate(shapeItem.position())
painter.setBrush(shapeItem.color())
painter.drawPath(shapeItem.path())
painter.translate(-shapeItem.position())
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
index = self.itemAt(event.pos())
if index != -1:
self.itemInMotion = self.shapeItems[index]
self.previousPosition = event.pos()
value = self.shapeItems[index]
del self.shapeItems[index]
self.shapeItems.insert(len(self.shapeItems) - 1, value)
self.update()
def mouseMoveEvent(self, event):
if (event.buttons() & QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
def mouseReleaseEvent(self, event):
if (event.button() == QtCore.Qt.LeftButton) and self.itemInMotion:
self.moveItemTo(event.pos())
self.itemInMotion = None
def createNewCircle(self):
SortingBox.circle_count += 1
self.createShapeItem(self.circlePath,
"Circle <%d>" % SortingBox.circle_count,
self.randomItemPosition(), self.randomItemColor())
def createNewSquare(self):
SortingBox.square_count += 1
self.createShapeItem(self.squarePath,
"Square <%d>" % SortingBox.square_count,
self.randomItemPosition(), self.randomItemColor())
def createNewTriangle(self):
SortingBox.triangle_count += 1
self.createShapeItem(self.trianglePath,
"Triangle <%d>" % SortingBox.triangle_count,
self.randomItemPosition(), self.randomItemColor())
def itemAt(self, pos):
for i in range(len(self.shapeItems) - 1, -1, -1):
item = self.shapeItems[i]
if item.path().contains(QtCore.QPointF(pos - item.position())):
return i
return -1
def moveItemTo(self, pos):
offset = pos - self.previousPosition
self.itemInMotion.setPosition(self.itemInMotion.position() + offset)
self.previousPosition = QtCore.QPoint(pos)
self.update()
def updateButtonGeometry(self, button, x, y):
size = button.sizeHint()
button.setGeometry(x - size.width(), y - size.height(),
size.width(), size.height())
return y - size.height() - self.style().pixelMetric(QtGui.QStyle.PM_DefaultLayoutSpacing)
def createShapeItem(self, path, toolTip, pos, color):
shapeItem = ShapeItem()
shapeItem.setPath(path)
shapeItem.setToolTip(toolTip)
shapeItem.setPosition(pos)
shapeItem.setColor(color)
self.shapeItems.append(shapeItem)
self.update()
def createToolButton(self, toolTip, icon, member):
button = QtGui.QToolButton(self)
button.setToolTip(toolTip)
button.setIcon(icon)
button.setIconSize(QtCore.QSize(32, 32))
button.clicked.connect(member)
return button
def initialItemPosition(self, path):
y = (self.height() - path.controlPointRect().height()) / 2
if len(self.shapeItems) == 0:
x = ((3 * self.width()) / 2 - path.controlPointRect().width()) / 2
else:
x = (self.width() / len(self.shapeItems) - path.controlPointRect().width()) / 2
return QtCore.QPoint(x, y)
def randomItemPosition(self):
x = random.randint(0, self.width() - 120)
y = random.randint(0, self.height() - 120)
return QtCore.QPoint(x, y)
def initialItemColor(self):
hue = ((len(self.shapeItems) + 1) * 85) % 256
return QtGui.QColor.fromHsv(hue, 255, 190)
def randomItemColor(self):
return QtGui.QColor.fromHsv(random.randint(0, 256), 255, 190)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
sortingBox = SortingBox()
sortingBox.show()
sys.exit(app.exec_()) | en | 0.577039 | #!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2004-2005 Trolltech AS. All rights reserved. ## ## This file is part of the example classes of the Qt Toolkit. ## ## This file may be used under the terms of the GNU General Public ## License version 2.0 as published by the Free Software Foundation ## and appearing in the file LICENSE.GPL included in the packaging of ## this file. Please review the following information to ensure GNU ## General Public Licensing requirements will be met: ## http://www.trolltech.com/products/qt/opensource.html ## ## If you are unsure which license is appropriate for your use, please ## review the following information: ## http://www.trolltech.com/products/qt/licensing.html or contact the ## sales department at <EMAIL>. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################# | 2.158648 | 2 |
cloud99/monitors/dummy.py | cisco-oss-eng/Cloud99 | 28 | 6618063 | <filename>cloud99/monitors/dummy.py
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from voluptuous import Schema
from cloud99.monitors import BaseMonitor
class DummyMonitor(BaseMonitor):
schema = Schema({}, extra=True)
COOL_DOWN = 10
def monitor(self):
time.sleep(DummyMonitor.COOL_DOWN)
self.actor_inbox.put({"msg": "monitor"})
| <filename>cloud99/monitors/dummy.py
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from voluptuous import Schema
from cloud99.monitors import BaseMonitor
class DummyMonitor(BaseMonitor):
schema = Schema({}, extra=True)
COOL_DOWN = 10
def monitor(self):
time.sleep(DummyMonitor.COOL_DOWN)
self.actor_inbox.put({"msg": "monitor"})
| en | 0.851539 | # Copyright 2016 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 2.12826 | 2 |
caerus/__init__.py | erdogant/caerus | 5 | 6618064 | <gh_stars>1-10
from caerus.caerus import caerus
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.5'
# module level doc-string
__doc__ = """
caerus
=====================================================================
Description
-----------
caerus is a powerful detection analysis library for local minima and maxima.
This package determines the local-minima with the corresponding local-maxima
across time series data without the need of normalization procedures.
See README.md file for more information.
Here are just a few of the things that caerus does well:
- Input data is a simple vector of values for which the order matters.
- Ouput contains detected start-stop regions of local minima and maxima.
- Output figures are created.
- Gridsearch is possible
Examples
--------
>>> from caerus import caerus
>>> cs = caerus()
>>> X = cs.download_example()
>>> results = cs.fit(X)
>>> fig = cs.plot()
>>> # Results in the dataframe
>>> results['df']
References
----------
* https://github.com/erdogant/caerus
* https://github.com/erdogant/findpeaks
"""
| from caerus.caerus import caerus
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.5'
# module level doc-string
__doc__ = """
caerus
=====================================================================
Description
-----------
caerus is a powerful detection analysis library for local minima and maxima.
This package determines the local-minima with the corresponding local-maxima
across time series data without the need of normalization procedures.
See README.md file for more information.
Here are just a few of the things that caerus does well:
- Input data is a simple vector of values for which the order matters.
- Ouput contains detected start-stop regions of local minima and maxima.
- Output figures are created.
- Gridsearch is possible
Examples
--------
>>> from caerus import caerus
>>> cs = caerus()
>>> X = cs.download_example()
>>> results = cs.fit(X)
>>> fig = cs.plot()
>>> # Results in the dataframe
>>> results['df']
References
----------
* https://github.com/erdogant/caerus
* https://github.com/erdogant/findpeaks
""" | en | 0.689785 | # module level doc-string caerus ===================================================================== Description ----------- caerus is a powerful detection analysis library for local minima and maxima. This package determines the local-minima with the corresponding local-maxima across time series data without the need of normalization procedures. See README.md file for more information. Here are just a few of the things that caerus does well: - Input data is a simple vector of values for which the order matters. - Ouput contains detected start-stop regions of local minima and maxima. - Output figures are created. - Gridsearch is possible Examples -------- >>> from caerus import caerus >>> cs = caerus() >>> X = cs.download_example() >>> results = cs.fit(X) >>> fig = cs.plot() >>> # Results in the dataframe >>> results['df'] References ---------- * https://github.com/erdogant/caerus * https://github.com/erdogant/findpeaks | 2.267271 | 2 |
tests/v2/test_backup.py | luxiaba/pyvultr | 4 | 6618065 | from pyvultr.base_api import SupportHttpMethod
from pyvultr.v2 import Backup
from tests.v2 import BaseTestV2
class TestBackup(BaseTestV2):
def test_list(self):
"""Test list backups."""
with self._get("response/backup_list") as mock:
_excepted_result = mock.python_body["backups"][0]
excepted_result = Backup.from_dict(_excepted_result)
_real_result = self.api_v2.backup.list(capacity=1)
real_result: Backup = _real_result.first()
self.assertEqual(mock.url, "https://api.vultr.com/v2/backups")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
def test_get(self):
"""Test get backup."""
with self._get("response/backup", expected_returned=Backup) as mock:
excepted_result: Backup = mock.python_body
real_result: Backup = self.api_v2.backup.get("test_back_ip_1987573")
self.assertEqual(mock.url, "https://api.vultr.com/v2/backups/test_back_ip_1987573")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
| from pyvultr.base_api import SupportHttpMethod
from pyvultr.v2 import Backup
from tests.v2 import BaseTestV2
class TestBackup(BaseTestV2):
def test_list(self):
"""Test list backups."""
with self._get("response/backup_list") as mock:
_excepted_result = mock.python_body["backups"][0]
excepted_result = Backup.from_dict(_excepted_result)
_real_result = self.api_v2.backup.list(capacity=1)
real_result: Backup = _real_result.first()
self.assertEqual(mock.url, "https://api.vultr.com/v2/backups")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
def test_get(self):
"""Test get backup."""
with self._get("response/backup", expected_returned=Backup) as mock:
excepted_result: Backup = mock.python_body
real_result: Backup = self.api_v2.backup.get("test_back_ip_1987573")
self.assertEqual(mock.url, "https://api.vultr.com/v2/backups/test_back_ip_1987573")
self.assertEqual(mock.method, SupportHttpMethod.GET.value)
self.assertEqual(real_result, excepted_result)
| en | 0.756906 | Test list backups. Test get backup. | 2.70425 | 3 |
leanservice/crud.py | mishioo/leanservice | 0 | 6618066 | <reponame>mishioo/leanservice
"""Database operatons.
This application does not need many database interactions, so all are defined in the
single module. As in standard CRUD module, functions defied here are responsible for
reading and modifying database state for persistent storage. Two typer of operations are
supported: adding a record to history and retriving whole history.
"""
from typing import List
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from . import models, schemas
async def add_to_history(db: Session, post: schemas.RedditPost) -> models.RedditPicture:
"""Adds given picture post to history of picks."""
db_entry = models.RedditPicture(url=post.url, post_url=post.post_url)
db.add(db_entry)
# above is an equivalent of
# 'INSERT INTO history (id, url, post_url, created_at) VALUES (?, ?, ?, ?)'
# where "url" and "post_url" values are provided by `RedditPost` schema
# and "id" and "created_at" are auto-generated by model
await db.commit()
return db_entry
async def get_history(db: Session) -> List[models.RedditPicture]:
"""Retrieves a whole history of picks."""
query = select(
models.RedditPicture.url,
models.RedditPicture.post_url,
models.RedditPicture.created_at,
) # equivalent of:
# 'SELECT history.url, history.post_url, history.created_at FROM history'
result = await db.execute(query)
return result.all()
| """Database operatons.
This application does not need many database interactions, so all are defined in the
single module. As in standard CRUD module, functions defied here are responsible for
reading and modifying database state for persistent storage. Two typer of operations are
supported: adding a record to history and retriving whole history.
"""
from typing import List
from sqlalchemy.future import select
from sqlalchemy.orm import Session
from . import models, schemas
async def add_to_history(db: Session, post: schemas.RedditPost) -> models.RedditPicture:
"""Adds given picture post to history of picks."""
db_entry = models.RedditPicture(url=post.url, post_url=post.post_url)
db.add(db_entry)
# above is an equivalent of
# 'INSERT INTO history (id, url, post_url, created_at) VALUES (?, ?, ?, ?)'
# where "url" and "post_url" values are provided by `RedditPost` schema
# and "id" and "created_at" are auto-generated by model
await db.commit()
return db_entry
async def get_history(db: Session) -> List[models.RedditPicture]:
"""Retrieves a whole history of picks."""
query = select(
models.RedditPicture.url,
models.RedditPicture.post_url,
models.RedditPicture.created_at,
) # equivalent of:
# 'SELECT history.url, history.post_url, history.created_at FROM history'
result = await db.execute(query)
return result.all() | en | 0.847846 | Database operatons. This application does not need many database interactions, so all are defined in the single module. As in standard CRUD module, functions defied here are responsible for reading and modifying database state for persistent storage. Two typer of operations are supported: adding a record to history and retriving whole history. Adds given picture post to history of picks. # above is an equivalent of # 'INSERT INTO history (id, url, post_url, created_at) VALUES (?, ?, ?, ?)' # where "url" and "post_url" values are provided by `RedditPost` schema # and "id" and "created_at" are auto-generated by model Retrieves a whole history of picks. # equivalent of: # 'SELECT history.url, history.post_url, history.created_at FROM history' | 3.400426 | 3 |
Term 3/18-22/trace.py | theseana/apondaone | 0 | 6618067 | from tkinter import *
def validation(var, indx, mode):
c1 = len(my_var.get()) == 16
c2 = my_var.get().isdigit()
if c1 and c2:
e1.config(bg="green")
else:
e1.config(bg="red")
root = Tk()
my_var = StringVar()
my_var.trace_add('write', validation)
Label(root, textvariable = my_var).grid(row=0, column=0)
e1 = Entry(root, textvariable = my_var)
e1.grid(row=1, column=0)
root.mainloop() | from tkinter import *
def validation(var, indx, mode):
c1 = len(my_var.get()) == 16
c2 = my_var.get().isdigit()
if c1 and c2:
e1.config(bg="green")
else:
e1.config(bg="red")
root = Tk()
my_var = StringVar()
my_var.trace_add('write', validation)
Label(root, textvariable = my_var).grid(row=0, column=0)
e1 = Entry(root, textvariable = my_var)
e1.grid(row=1, column=0)
root.mainloop() | none | 1 | 3.380249 | 3 | |
algorithms/dynamic_programming/coin_change.py | avenet/hackerrank | 0 | 6618068 | #!/bin/python3
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
coins = list(map(int, input().strip().split(' ')))
total_coins = len(coins)
coins.sort()
def memoize(f):
cache = {}
def decorated(n, i):
if cache.get((n, i)):
return cache[(n, i)]
result = f(n, i)
cache[(n, i)] = result
return result
return decorated
@memoize
def get_ways(n, start_index):
if n == 0:
return 1
result = 0
for i in range(start_index, total_coins):
if n >= coins[i]:
result += get_ways(n - coins[i], i)
return result
print(get_ways(n, 0))
| #!/bin/python3
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
coins = list(map(int, input().strip().split(' ')))
total_coins = len(coins)
coins.sort()
def memoize(f):
cache = {}
def decorated(n, i):
if cache.get((n, i)):
return cache[(n, i)]
result = f(n, i)
cache[(n, i)] = result
return result
return decorated
@memoize
def get_ways(n, start_index):
if n == 0:
return 1
result = 0
for i in range(start_index, total_coins):
if n >= coins[i]:
result += get_ways(n - coins[i], i)
return result
print(get_ways(n, 0))
| ru | 0.16812 | #!/bin/python3 | 3.092133 | 3 |
rdsslib/kinesis/factory.py | JiscSD/rdss-shared-libraries | 0 | 6618069 | """ Factory for creating Kinesis Clients"""
import boto3
from .client import KinesisClient, EnhancedKinesisClient
from .decorators import RouterHistoryDecorator
from .handlers import MessageErrorHandler
from .reader import StreamReader
from .writer import StreamWriter
def kinesis_client_factory(
client_type,
invalid_stream_name='invalid_stream',
error_stream_name='error_stream',
read_interval=0.2):
""" Create customised instances of KinesisClient or its subclasses
:param client_type: Specifies the type of client that the factory
should construct
:return: An instance of Kinesis client
:rtype: client.KinesisClient or client.EnhancedKinesisClient
"""
boto_client = boto3.client('kinesis')
writer = StreamWriter(client=boto_client)
reader = StreamReader(client=boto_client, read_interval=read_interval)
if client_type == 'basic':
return KinesisClient(writer=writer,
reader=reader)
elif client_type == 'enhanced':
decorators = [RouterHistoryDecorator()]
handler = MessageErrorHandler(invalid_stream_name=invalid_stream_name,
error_stream_name=error_stream_name,
writer=writer)
return EnhancedKinesisClient(writer=writer,
reader=reader,
error_handler=handler,
decorators=decorators)
| """ Factory for creating Kinesis Clients"""
import boto3
from .client import KinesisClient, EnhancedKinesisClient
from .decorators import RouterHistoryDecorator
from .handlers import MessageErrorHandler
from .reader import StreamReader
from .writer import StreamWriter
def kinesis_client_factory(
client_type,
invalid_stream_name='invalid_stream',
error_stream_name='error_stream',
read_interval=0.2):
""" Create customised instances of KinesisClient or its subclasses
:param client_type: Specifies the type of client that the factory
should construct
:return: An instance of Kinesis client
:rtype: client.KinesisClient or client.EnhancedKinesisClient
"""
boto_client = boto3.client('kinesis')
writer = StreamWriter(client=boto_client)
reader = StreamReader(client=boto_client, read_interval=read_interval)
if client_type == 'basic':
return KinesisClient(writer=writer,
reader=reader)
elif client_type == 'enhanced':
decorators = [RouterHistoryDecorator()]
handler = MessageErrorHandler(invalid_stream_name=invalid_stream_name,
error_stream_name=error_stream_name,
writer=writer)
return EnhancedKinesisClient(writer=writer,
reader=reader,
error_handler=handler,
decorators=decorators)
| en | 0.619224 | Factory for creating Kinesis Clients Create customised instances of KinesisClient or its subclasses :param client_type: Specifies the type of client that the factory should construct :return: An instance of Kinesis client :rtype: client.KinesisClient or client.EnhancedKinesisClient | 2.931176 | 3 |
makinkinz/base/context_processor.py | makinkinz/makinkinz-website | 0 | 6618070 | <reponame>makinkinz/makinkinz-website
from .models import Tags
def add_variable_to_context(request):
tags = Tags.objects.all()
return {
'tags': tags
} | from .models import Tags
def add_variable_to_context(request):
tags = Tags.objects.all()
return {
'tags': tags
} | none | 1 | 1.519543 | 2 | |
proj/pro_settings.py | kant/predictprotein-webserver-proq3 | 0 | 6618071 | """
Django settings for proj project in production
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append("%s/pred/app/"%(BASE_DIR))
import myfunc
try:
from shared_settings import *
except ImportError:
pass
with open('/etc/django_pro_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [u'localhost', u'dev.proq3.bioinfo.se', u'proq3.bioinfo.se']
computenodefile = "%s/pred/static/computenode.txt"%(BASE_DIR)
if os.path.exists(computenodefile):
nodelist = []
try:
nodelist = myfunc.ReadIDList2(computenodefile,col=0)
except:
pass
ALLOWED_HOSTS += nodelist
| """
Django settings for proj project in production
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append("%s/pred/app/"%(BASE_DIR))
import myfunc
try:
from shared_settings import *
except ImportError:
pass
with open('/etc/django_pro_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [u'localhost', u'dev.proq3.bioinfo.se', u'proq3.bioinfo.se']
computenodefile = "%s/pred/static/computenode.txt"%(BASE_DIR)
if os.path.exists(computenodefile):
nodelist = []
try:
nodelist = myfunc.ReadIDList2(computenodefile,col=0)
except:
pass
ALLOWED_HOSTS += nodelist
| en | 0.75732 | Django settings for proj project in production For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # SECURITY WARNING: don't run with debug turned on in production! | 2.060565 | 2 |
basics/10.py | RupakMukherjee/comp-py-2021 | 1 | 6618072 | stuff = dict()
print stuff.get('candy',-1)
x = ('Glenn', 'Sally', 'Joseph')
print x[2]
y = (1,9,2)
print y
print max(y)
for iter in y:
print iter
x = [9,8,7] # -- this is a list
x[2] = 6
print x # -- hence it is mutable..
# x = (9,8,7) -- this is a touple
#x[2] = 6
#print x -- it is NOT mutable.
# y = 'ABC'
# y[2] = "D"
# it will give a traceback...
x = (3,2,1)
# x.sort()
# x.append(5)
# x.reverse()
x.index(3)
# x.pop()
l = list()
dir(l)
t = tuple()
dir(t)
(x,y) = (4, 'fred')
print y
(a,b) = (99,98)
print a
a,b = (99,98)
print a
d = dict()
d['csev'] = 2
d['cwen'] = 4
for (k,v) in d.items():
print k,v
tups = d.items()
print tups
print (0, 1, 2) < (5, 1, 2)
print (0, 1, 2000000) < (0, 3, 4)
print ('Jones', 'Sally') < ("Jones", 'Fred')
print ('Jones', "Sally") < ("Jones", "Sam")
d = {'a':10, 'b':1, 'c':22}
t = d.items()
print t
t.sort()
print t
d = {'a':10, 'b':1, 'c':22}
print d.items()
t = sorted(d.items())
print t
for k,v in sorted(d.items()):
print k,v
c = {'a':10, 'b':1, 'c':22}
tmp = list()
for k,v in c.items():
tmp.append( (v,k) )
print tmp
tmp.sort(reverse = True)
print tmp
tmp.sort(reverse = False)
print tmp
fhand = open('A-8-2-data')
counts = dict()
for line in fhand:
words = line.split()
for word in words:
counts[word] = counts.get(word,0) + 1
lst = list()
for key, val in counts.items():
lst.append ( (val, key) )
lst.sort(reverse = True)
for val, key in lst[:10]:
print key, val
c = {'a':10, 'b':1, 'c':22}
print sorted ( [ (v,k) for k,v in c.items() ] )
| stuff = dict()
print stuff.get('candy',-1)
x = ('Glenn', 'Sally', 'Joseph')
print x[2]
y = (1,9,2)
print y
print max(y)
for iter in y:
print iter
x = [9,8,7] # -- this is a list
x[2] = 6
print x # -- hence it is mutable..
# x = (9,8,7) -- this is a touple
#x[2] = 6
#print x -- it is NOT mutable.
# y = 'ABC'
# y[2] = "D"
# it will give a traceback...
x = (3,2,1)
# x.sort()
# x.append(5)
# x.reverse()
x.index(3)
# x.pop()
l = list()
dir(l)
t = tuple()
dir(t)
(x,y) = (4, 'fred')
print y
(a,b) = (99,98)
print a
a,b = (99,98)
print a
d = dict()
d['csev'] = 2
d['cwen'] = 4
for (k,v) in d.items():
print k,v
tups = d.items()
print tups
print (0, 1, 2) < (5, 1, 2)
print (0, 1, 2000000) < (0, 3, 4)
print ('Jones', 'Sally') < ("Jones", 'Fred')
print ('Jones', "Sally") < ("Jones", "Sam")
d = {'a':10, 'b':1, 'c':22}
t = d.items()
print t
t.sort()
print t
d = {'a':10, 'b':1, 'c':22}
print d.items()
t = sorted(d.items())
print t
for k,v in sorted(d.items()):
print k,v
c = {'a':10, 'b':1, 'c':22}
tmp = list()
for k,v in c.items():
tmp.append( (v,k) )
print tmp
tmp.sort(reverse = True)
print tmp
tmp.sort(reverse = False)
print tmp
fhand = open('A-8-2-data')
counts = dict()
for line in fhand:
words = line.split()
for word in words:
counts[word] = counts.get(word,0) + 1
lst = list()
for key, val in counts.items():
lst.append ( (val, key) )
lst.sort(reverse = True)
for val, key in lst[:10]:
print key, val
c = {'a':10, 'b':1, 'c':22}
print sorted ( [ (v,k) for k,v in c.items() ] )
| en | 0.631258 | # -- this is a list # -- hence it is mutable.. # x = (9,8,7) -- this is a touple #x[2] = 6 #print x -- it is NOT mutable. # y = 'ABC' # y[2] = "D" # it will give a traceback... # x.sort() # x.append(5) # x.reverse() # x.pop() | 3.931975 | 4 |
src/aecg/io.py | FDA/aecg-python | 2 | 6618073 | """ I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code found: {valrow["VALUE"]}')
aecg.PTPT["referenceEvent"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent_"
"displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName found: '
f'{valrow["VALUE"]}')
aecg.PTPT["referenceEvent_displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
return aecg
def parse_rhythm_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform information
This function parses the `aecg_doc` xml document searching for rhythm
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "RHYTHM",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} found: {valrow["VALUE"]}')
aecg.RHYTHMID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["code"] = valrow["VALUE"]
if aecg.RHYTHMCODE["code"] != "RHYTHM":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "RHYTHM",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "RHYTHM",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} found: {valrow["VALUE"]}')
aecg.RHYTHMEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform information
This function parses the `aecg_doc` xml document searching for derived
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} found: {valrow["VALUE"]}')
aecg.DERIVEDID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["code"] = valrow["VALUE"]
if aecg.DERIVEDCODE["code"] != "REPRESENTATIVE_BEAT":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "DERIVED",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} found: {valrow["VALUE"]}')
aecg.DERIVEDEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_rhythm_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm's timeseries
This function parses the `aecg_doc` xml document searching for rhythm
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/component/sequenceSet/' \
'component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: {valrow["VALUE"]}')
aecg.RHYTHMTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD found: {valrow2["VALUE"]}')
aecg.RHYTHMTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.RHYTHMTIME["increment"] = float(
valrow2["VALUE"])
else:
aecg.RHYTHMTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = \
valpd.append(pd.DataFrame([valrow2],
columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: '
f'{valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from RHYTHM sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed RHYTHMTIME
aecglead.LEADTIME = copy.deepcopy(aecg.RHYTHMTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"ORIGIN is not a "\
"number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE '
f'value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"SCALE is not a "\
"number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.RHYTHMLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts derived's timeseries
This function parses the `aecg_doc` xml document searching for derived
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.DERIVEDLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/derivation/derivedSeries/component'\
'/sequenceSet/component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
aecg.DERIVEDTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD found: '
f'{valrow2["VALUE"]}')
aecg.DERIVEDTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.DERIVEDTIME["increment"] =\
float(valrow2["VALUE"])
else:
aecg.DERIVEDTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from DERIVED sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed DERIVEDTIME
aecglead.LEADTIME = copy.deepcopy(aecg.DERIVEDTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = \
"SEQUENCE_LEAD_ORIGIN is not a number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE'
f' value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_SCALE"\
" is not a number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.DERIVEDLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
anngrp: Dict,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts waveform annotations
This function parses the `aecg_doc` xml document searching for
waveform annotation sets that includes in the returned
:any:`Aecg`. As indicated in the `anngrp` parameter, each annotation set
is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.RHYTHMANNS`
or :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
anngrp (Dict): includes a `valgroup` key indicating whether the
rhythm or derived waveform annotations should be located, and a
`path_prefix` with the xml path prefix for which start searching
for annotation sets in the `aecg_doc` xml document.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
val_grp = anngrp["valgroup"]
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations started')
path_prefix = anngrp["path_prefix"]
anns_setnodes = aecg_doc.xpath(path_prefix.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(anns_setnodes) == 0:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'{anngrp["valgroup"]}: no annotation nodes found')
for xmlnode in anns_setnodes:
aecgannset = AecgAnnotationSet()
xmlnode_path = aecg_doc.getpath(xmlnode)
# Annotation set: human author information
valrow = validate_xpath(
aecg_doc,
xmlnode_path + "/author/assignedEntity/assignedAuthorType/"
"assignedPerson/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "RHYTHM", "ANNSET_AUTHOR_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author: {valrow["VALUE"]}')
aecgannset.person = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Annotation set: device author information
valrow = validate_xpath(aecg_doc,
xmlnode_path + "/author/assignedEntity"
"/assignedAuthorType/"
"assignedDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_MODEL"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model: {tmp}')
aecgannset.device["model"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
xmlnode_path +
"/author/assignedEntity/"
"assignedAuthorType/assignedDevice/"
"playedManufacturedDevice/"
"manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name: {tmp}')
aecgannset.device["name"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
aecgannset, valpd = parse_annotations(aecg.filename, aecg.zipContainer,
aecg_doc,
aecgannset,
path_prefix,
xmlnode_path,
anngrp["valgroup"],
log_validation)
if len(aecgannset.anns) == 0:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} no annotations set found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
if anngrp["valgroup"] == "RHYTHM":
aecg.RHYTHMANNS.append(copy.deepcopy(aecgannset))
else:
aecg.DERIVEDANNS.append(copy.deepcopy(aecgannset))
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations finished')
return aecg
def parse_rhythm_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform annotations
This function parses the `aecg_doc` xml document searching for rhtyhm
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.RHYTHMANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "RHYTHM",
"path_prefix": "./component/series/subjectOf/annotationSet"},
log_validation)
return aecg
def parse_derived_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform annotations
This function parses the `aecg_doc` xml document searching for derived
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "DERIVED",
"path_prefix": "./component/series/derivation/"
"derivedSeries/subjectOf/annotationSet"},
log_validation)
return aecg
def read_aecg(xml_filename: str, zip_container: str = "",
include_digits: bool = False,
aecg_schema_filename: str = "",
ns_clean: bool = True, remove_blank_text: bool = True,
in_memory_xml: bool = False,
log_validation: bool = False) -> Aecg:
"""Reads an aECG HL7 XML file and returns an `Aecg` object.
Args:
xml_filename (str): Path to the aECG xml file.
zip_container (str, optional): Zipfile containing the aECG xml. Empty
string if path points to an xml file in the system. Defaults to "".
include_digits (bool, optional): Waveform values are not read nor
parsed if False. Defaults to False.
aecg_schema_filename (str, optional): xsd file to instantiate the
lxml.etree.XMLSchema object for validating the aECG xml document.
Schema validation is not performed if empty string is provided.
Defaults to "".
ns_clean (bool, optional): Indicates whether to clean up namespaces
during XML parsing. Defaults to True.
remove_blank_text (bool, optional): Indicates whether to clean up blank
text during parsing. Defaults to True.
in_memory_xml (bool, optional): If True, keeps a copy of the parsed XML
in :attr:`xmldoc`.
log_validation (bool, optional): If True, populates
:attr:`validatorResults` with parsing information retrieved while
reading and parsing the aECG xml file.
Returns:
Aecg: An aECG object instantiated with the information read from
the `xml_filename` file.
"""
# =======================================
# Initialize Aecg object
# =======================================
aecg = Aecg()
aecg.filename = xml_filename
aecg.zipContainer = zip_container
# =======================================
# Read XML document
# =======================================
aecg_doc = None
parser = etree.XMLParser(ns_clean=ns_clean,
remove_blank_text=remove_blank_text)
if zip_container == "":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} [no zip container]')
valrow = new_validation_row(xml_filename, "READFILE", "FILENAME")
valrow["VALUE"] = xml_filename
try:
aecg_doc = etree.parse(xml_filename, parser)
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not open or parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Add row with zipcontainer rule as PASSED because there is no zip
# container to test
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALIOUT"] = "PASSED"
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} '
f'[zip container: {zip_container}]')
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALUE"] = zip_container
try:
with zipfile.ZipFile(zip_container, "r") as zf:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'Zip file opened')
valrow2 = new_validation_row(xml_filename, "READFILE",
"FILENAME")
valrow2["VALUE"] = xml_filename
try:
aecg0 = zf.read(xml_filename)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file read from zip file')
try:
aecg_doc = etree.fromstring(aecg0, parser)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
except Exception as ex:
msg = f'Could not parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow2["VALIOUT"] = "ERROR"
valrow2["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
valrow2["VALIOUT"] = "PASSED"
valrow2["VALIMSG"] = ""
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not find or read XML file in the zip file: '\
f'\"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow2["VALIOUT"] = "ERROR"
valrow2["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not open zip file container: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if aecg_doc is not None:
aecg.xmlfound = True
if not isinstance(aecg_doc, etree._ElementTree):
aecg_doc = etree.ElementTree(aecg_doc)
if (aecg.xmlfound and
(not log_validation or
((aecg.validatorResults.shape[0] == 1 and
aecg.validatorResults["VALIOUT"][0] == "PASSED") or
(aecg.validatorResults.shape[0] == 2 and
aecg.validatorResults["VALIOUT"][0] == "PASSED" and
aecg.validatorResults["VALIOUT"][1] == "PASSED")))):
# =======================================
# ECG file loaded and parsed to XML doc successfully
# =======================================
aecg.xmlfound = True
# =======================================
# Keep parsed XML if requested
# =======================================
if in_memory_xml:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML document cached in memory')
aecg.xmldoc = aecg_doc
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML document not cached in memory')
# =======================================
# Validate XML doc if schema was provided
# =======================================
valrow = new_validation_row(xml_filename, "SCHEMA", "VALIDATION")
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Schema not provided for validation"
if aecg_schema_filename is not None and aecg_schema_filename != "":
valrow["VALUE"] = aecg_schema_filename
try:
aecg_schema_doc = etree.parse(aecg_schema_filename)
try:
aecg_schema = etree.XMLSchema(aecg_schema_doc)
if aecg_schema.validate(aecg_doc):
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file passed Schema validation')
aecg.isValid = "Y"
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
else:
msg = f'XML file did not pass Schema validation'
logger.warning(
f'{aecg.filename},{aecg.zipContainer},{msg}')
aecg.isValid = "N"
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
except Exception as ex:
msg = f'XML Schema is not valid: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
except Exception as ex:
msg = f'Schema file not found or parsing of schema failed: '\
f'\"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'Schema not provided for XML validation')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# UUID and EGDTC and DEVICE
# =======================================
aecg = parse_generalinfo(aecg_doc, aecg, log_validation)
# =======================================
# USUBJID, SEX/GENDER, BIRTHTIME, RACE
# =======================================
aecg = parse_subjectinfo(aecg_doc, aecg, log_validation)
# =======================================
# TRTA
# =======================================
aecg = parse_trtainfo(aecg_doc, aecg, log_validation)
# =======================================
# CLINICAL TRIAL
# =======================================
aecg = parse_studyinfo(aecg_doc, aecg, log_validation)
# =======================================
# Timepoints
# =======================================
aecg = parse_timepoints(aecg_doc, aecg, log_validation)
# =======================================
# Rhythm Waveforms information
# =======================================
aecg = parse_rhythm_waveform_info(aecg_doc, aecg, log_validation)
# =======================================
# Derived Waveforms information
# =======================================
aecg = parse_derived_waveform_info(aecg_doc, aecg, log_validation)
# =======================================
# Rhythm Waveforms timeseries
# =======================================
aecg = parse_rhythm_waveform_timeseries(aecg_doc, aecg, include_digits,
log_validation)
# =======================================
# Derived Waveforms timeseries
# =======================================
aecg = parse_derived_waveform_timeseries(aecg_doc, aecg,
include_digits,
log_validation)
# =======================================
# Rhythm and Derived Waveforms annotations
# =======================================
aecg = parse_rhythm_waveform_annotations(
aecg_doc, aecg, log_validation)
aecg = parse_derived_waveform_annotations(
aecg_doc, aecg, log_validation)
return aecg
| """ I/O functions of the aecg package: tools for annotated ECG HL7 XML files
This module implements helper functions to parse and read annotated
electrocardiogram (ECG) stored in XML files following HL7
specification.
See authors, license and disclaimer at the top level directory of this project.
"""
# Imports =====================================================================
from typing import Dict, Tuple
from lxml import etree
from aecg import validate_xpath, new_validation_row, VALICOLS, \
TIME_CODES, SEQUENCE_CODES, \
Aecg, AecgLead, AecgAnnotationSet
import copy
import logging
import pandas as pd
import re
import zipfile
# Python logging ==============================================================
logger = logging.getLogger(__name__)
def parse_annotations(xml_filename: str,
zip_filename: str,
aecg_doc: etree._ElementTree,
aecgannset: AecgAnnotationSet,
path_prefix: str,
annsset_xmlnode_path: str,
valgroup: str = "RHYTHM",
log_validation: bool = False) -> Tuple[
AecgAnnotationSet, pd.DataFrame]:
"""Parses `aecg_doc` XML document and extracts annotations
Args:
xml_filename (str): Filename of the aECG XML file.
zip_filename (str): Filename of zip file containint the aECG XML file.
If '', then xml file is not stored in a zip file.
aecg_doc (etree._ElementTree): XML document of the aECG XML file.
aecgannset (AecgAnnotationSet): Annotation set to which append found
annotations.
path_prefix (str): Prefix of xml path from which start searching for
annotations.
annsset_xmlnode_path (str): Path to xml node of the annotation set
containing the annotations.
valgroup (str, optional): Indicates whether to search annotations in
rhythm or derived waveform. Defaults to "RHYTHM".
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with
found annotations and dataframe with results of validation.
"""
anngrpid = 0
# Annotations stored within a beat
beatnodes = aecg_doc.xpath((
path_prefix +
"/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace(
'/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'})
beatnum = 0
valpd = pd.DataFrame()
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {len(beatnodes)} annotated beats found')
for beatnode in beatnodes:
for rel_path in ["../component/annotation/"
"code[contains(@code, \"MDC_ECG_\")]"]:
annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotation code
valrow2 = validate_xpath(
annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename, valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame(
[valrow2], columns=VALICOLS), ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
# Annotations type
valrow2 = validate_xpath(
annsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
subannsnodes = annsnode.xpath(
rel_path.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
else:
subannsnodes += [annsnode]
# Exclude annotations reporting interval values only
subannsnodes = [
sa for sa in subannsnodes
if not sa.get("code").startswith("MDC_ECG_TIME_PD_")]
for subannsnode in subannsnodes:
# Annotations type
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(subannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
rel_path3 = "../support/supportingROI/component/"\
"boundary/value"
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/"\
"boundary/code"
roinodes = subannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(
roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
# Annotations type
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_BEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\
"/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["beatnum"] = beatnum
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(annsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used
# by value and supporting ROI
rel_path4 = "../support/supportingROI/component/" \
"boundary/code"
roinodes = annsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_BEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/" + \
rel_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4],
columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
else:
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
anngrpid = anngrpid + 1
beatnum = beatnum + 1
if len(beatnodes) > 0:
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {beatnum} annotated beats and {anngrpid} '
f'annotations groups found')
anngrpid_from_beats = anngrpid
# Annotations stored without an associated beat
for codetype_path in ["/component/annotation/code["
"(contains(@code, \"MDC_ECG_\") and"
" not (@code=\'MDC_ECG_BEAT\'))]"]:
annsnodes = aecg_doc.xpath(
(path_prefix + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
rel_path2 = "../value"
for annsnode in annsnodes:
ann = {"anngrpid": anngrpid, "beatnum": "", "code": "",
"codetype": "",
"wavecomponent": "", "wavecomponent2": "",
"timecode": "",
"value": "", "value_unit": "",
"low": "", "low_unit": "",
"high": "", "high_unit": "",
"lead": ""}
# Annotations code
valrow2 = validate_xpath(annsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["code"] = valrow2["VALUE"]
# Annotation type from top level value
valrow2 = validate_xpath(annsnode,
"../value",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename, valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/value"
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
if valrow2["VALIOUT"] == "PASSED":
ann["codetype"] = valrow2["VALUE"]
subannsnodes = annsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(subannsnodes) == 0:
subannsnodes = [annsnode]
for subannsnode in subannsnodes:
subsubannsnodes = subannsnode.xpath(
(".." + codetype_path).replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
tmpnodes = [subannsnode]
if len(subsubannsnodes) > 0:
tmpnodes = tmpnodes + subsubannsnodes
for subsubannsnode in tmpnodes:
ann["wavecomponent"] = ""
ann["wavecomponent2"] = ""
ann["timecode"] = ""
ann["value"] = ""
ann["value_unit"] = ""
ann["low"] = ""
ann["low_unit"] = ""
ann["high"] = ""
ann["high_unit"] = ""
roi_base = "../support/supportingROI/component/boundary"
rel_path3 = roi_base + "/value"
valrow2 = validate_xpath(
subsubannsnode,
".",
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/code"
if valrow2["VALIOUT"] == "PASSED":
if not ann["codetype"].endswith("WAVE"):
ann["codetype"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations type
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"code",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent"] = valrow2["VALUE"]
# if ann["wavecomponent"] == "":
# ann["wavecomponent"] = valrow2["VALUE"]
# else:
# ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value as attribute
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Annotations value units
valrow2 = validate_xpath(
subsubannsnode,
rel_path2,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path2
if valrow2["VALIOUT"] == "PASSED":
ann["value_unit"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotations info from supporting ROI
for n in ["", "low", "high"]:
if n != "":
rp = rel_path3 + "/" + n
else:
rp = rel_path3
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT_"
"ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
else:
roi_base = "../component/annotation/support/"\
"supportingROI/component/boundary"
# Annotations type
valrow2 = validate_xpath(subsubannsnode,
"../component/annotation/"
"value",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + \
"../component/annotation/value"
if valrow2["VALIOUT"] == "PASSED":
ann["wavecomponent2"] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# annotation values
if n != "":
rp = roi_base + "/value/" + n
else:
rp = roi_base + "/value"
valrow3 = validate_xpath(subsubannsnode,
rp,
"urn:hl7-org:v3",
"value",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n] = valrow3["VALUE"]
else:
ann["value"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
valrow3 = validate_xpath(
subsubannsnode,
rp,
"urn:hl7-org:v3",
"unit",
new_validation_row(xml_filename,
valgroup,
"ANNSET_NOBEAT"
"_ANNS"),
failcat="WARNING")
valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rp
if valrow3["VALIOUT"] == "PASSED":
if n != "":
ann[n + "_unit"] = valrow3["VALUE"]
else:
ann["value_unit"] = valrow3["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow3], columns=VALICOLS),
ignore_index=True)
# annotations time encoding, lead and other info used by
# value and supporting ROI
for rel_path4 in ["../support/supportingROI/component/"
"boundary",
"../component/annotation/support/"
"supportingROI/component/boundary"]:
roinodes = subsubannsnode.xpath(
rel_path4.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
for roinode in roinodes:
valrow4 = validate_xpath(roinode,
"./code",
"urn:hl7-org:v3",
"code",
new_validation_row(
xml_filename,
valgroup,
"ANNSET_NOBEAT_ANNS"),
failcat="WARNING")
valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \
codetype_path + "/" + rel_path4
if valrow4["VALIOUT"] == "PASSED":
if valrow4["VALUE"] in ["TIME_ABSOLUTE",
"TIME_RELATIVE"]:
ann["timecode"] = valrow4["VALUE"]
else:
ann["lead"] = valrow4["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow4], columns=VALICOLS),
ignore_index=True)
aecgannset.anns.append(copy.deepcopy(ann))
anngrpid = anngrpid + 1
logger.info(
f'{xml_filename},{zip_filename},'
f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups'
f' without an associated beat found')
return aecgannset, valpd
def parse_generalinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts general information
This function parses the `aecg_doc` xml document searching for general
information that includes in the returned `Aecg`: unique identifier (UUID),
ECG date and time of collection (EGDTC), and device information.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# UUID
# =======================================
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"root",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID found: {valrow["VALUE"]}')
aecg.UUID = valrow["VALUE"]
else:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID not found')
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"id\"]",
"",
"extension",
new_validation_row(aecg.filename,
"GENERAL",
"UUID"))
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension found: {valrow["VALUE"]}')
aecg.UUID += valrow["VALUE"]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID updated to: {aecg.UUID}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'UUID extension not found')
# =======================================
# EGDTC
# =======================================
valpd = pd.DataFrame()
egdtc_found = False
for n in ["low", "center", "high"]:
valrow = validate_xpath(aecg_doc,
"./*[local-name() = \"effectiveTime\"]/"
"*[local-name() = \"" + n + "\"]",
"",
"value",
new_validation_row(aecg.filename, "GENERAL",
"EGDTC_" + n),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
egdtc_found = True
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC {n} found: {valrow["VALUE"]}')
aecg.EGDTC[n] = valrow["VALUE"]
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if not egdtc_found:
logger.critical(
f'{aecg.filename},{aecg.zipContainer},'
f'EGDTC not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# DEVICE
# =======================================
# DEVICE = {"manufacturer": "", "model": "", "software": ""}
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_manufacturer"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer found: {tmp}')
aecg.DEVICE["manufacturer"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE manufacturer not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_model"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model found: {tmp}')
aecg.DEVICE["model"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/author/"
"seriesAuthor/manufacturedSeriesDevice/"
"softwareName",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "GENERAL",
"DEVICE_software"),
"WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "|")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software found: {tmp}')
aecg.DEVICE["software"] = valrow["VALUE"]
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DEVICE software not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_subjectinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for subject
information that includes in the returned `Aecg`: subject unique identifier
(USUBJID), gender, birthtime, and race.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# USUBJID
# =======================================
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"SUBJECTINFO",
"USUBJID_" + n))
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} found: {valrow["VALUE"]}')
aecg.USUBJID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""):
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.USUBJID cannot be established.')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(valpd,
ignore_index=True)
# =======================================
# SEX / GENDER
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/"
"administrativeGenderCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"SEX"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX found: {valrow["VALUE"]}')
aecg.SEX = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.SEX not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# BIRTHTIME
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/birthTime",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "SUBJECTINFO",
"BIRTHTIME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME found.')
aecg.BIRTHTIME = valrow["VALUE"]
# age_in_years = aecg.subject_age_in_years()
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.BIRTHTIME not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# RACE
# =======================================
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/subject/trialSubject/"
"subjectDemographicPerson/raceCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "SUBJECTINFO",
"RACE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE found: {valrow["VALUE"]}')
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DM.RACE not found')
aecg.RACE = valrow["VALUE"]
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_trtainfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts subject information
This function parses the `aecg_doc` xml document searching for treatment
information that includes in the returned `Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/definition/"
"treatmentGroupAssignment/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TRTA"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information found: {valrow["VALUE"]}')
aecg.TRTA = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TRTA information not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_studyinfo(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts study information
This function parses the `aecg_doc` xml document searching for study
information that includes in the returned `Aecg`: study unique identifier
(STUDYID), and study title.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"STUDYID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} found: {valrow["VALUE"]}')
aecg.STUDYID[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/componentOf/"
"subjectAssignment/componentOf/"
"clinicalTrial/title",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "STUDYINFO",
"STUDYTITLE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE found: {tmp}')
aecg.STUDYTITLE = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'STUDYTITLE not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
return aecg
def parse_timepoints(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts timepoints information
This function parses the `aecg_doc` xml document searching for timepoints
information that includes in the returned `Aecg`: absolute timepoint or
study event information (TPT), relative timepoint or study event relative
to a reference event (RTPT), and protocol timepoint information (PTPT).
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
# =======================================
# TPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/reasonCode",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"TPT_reasonCode"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode found: {valrow["VALUE"]}')
aecg.TPT["reasonCode"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT reasonCode not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./componentOf/timepointEvent/"
"effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename,
"STUDYINFO",
"TPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} found: {valrow["VALUE"]}')
aecg.TPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'TPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
# =======================================
# RTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename,
"STUDYINFO",
"RTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} found: {valrow["VALUE"]}')
aecg.RTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity value not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"pauseQuantity",
"urn:hl7-org:v3",
"unit",
new_validation_row(aecg.filename, "STUDYINFO",
"RTPT_pauseQuantity_unit"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit found: {valrow["VALUE"]}')
aecg.RTPT["pauseQuantity_unit"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RTPT pauseQuantity unit not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
# =======================================
# PTPT
# =======================================
valpd = pd.DataFrame()
for n in ["code", "displayName"]:
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/"
"componentOf/protocolTimepointEvent/code",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename,
"STUDYINFO",
"PTPT_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} found: {valrow["VALUE"]}')
aecg.PTPT[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code found: {valrow["VALUE"]}')
aecg.PTPT["referenceEvent"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent code not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./definition/relativeTimepoint/componentOf/"
"protocolTimepointEvent/component/"
"referenceEvent/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "STUDYINFO",
"PTPT_referenceEvent_"
"displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName found: '
f'{valrow["VALUE"]}')
aecg.PTPT["referenceEvent_displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'PTPT referenceEvent displayName not found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(pd.DataFrame([valrow],
columns=VALICOLS),
ignore_index=True)
return aecg
def parse_rhythm_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform information
This function parses the `aecg_doc` xml document searching for rhythm
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "RHYTHM",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} found: {valrow["VALUE"]}')
aecg.RHYTHMID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["code"] = valrow["VALUE"]
if aecg.RHYTHMCODE["code"] != "RHYTHM":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "RHYTHM",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName found: {valrow["VALUE"]}')
aecg.RHYTHMCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "RHYTHM",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} found: {valrow["VALUE"]}')
aecg.RHYTHMEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHMEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_info(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform information
This function parses the `aecg_doc` xml document searching for derived
waveform information that includes in the returned `Aecg`: waveform
identifier, code, display name, and date and time of collection.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
valpd = pd.DataFrame()
for n in ["root", "extension"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/id",
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"ID_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} found: {valrow["VALUE"]}')
aecg.DERIVEDID[n] = valrow["VALUE"]
else:
if n == "root":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED ID {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"CODE"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["code"] = valrow["VALUE"]
if aecg.DERIVEDCODE["code"] != "REPRESENTATIVE_BEAT":
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected code found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected value found"
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/code",
"urn:hl7-org:v3",
"displayName",
new_validation_row(aecg.filename, "DERIVED",
"CODE_displayName"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName found: {valrow["VALUE"]}')
aecg.DERIVEDCODE["displayName"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED displayName not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
valpd = pd.DataFrame()
for n in ["low", "high"]:
valrow = validate_xpath(aecg_doc,
"./component/series/derivation/"
"derivedSeries/effectiveTime/" + n,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"EGDTC_" + n),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} found: {valrow["VALUE"]}')
aecg.DERIVEDEGDTC[n] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVEDEGDTC {n} not found')
if log_validation:
valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_rhythm_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm's timeseries
This function parses the `aecg_doc` xml document searching for rhythm
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/component/sequenceSet/' \
'component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "RHYTHM",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: {valrow["VALUE"]}')
aecg.RHYTHMTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD found: {valrow2["VALUE"]}')
aecg.RHYTHMTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.RHYTHMTIME["increment"] = float(
valrow2["VALUE"])
else:
aecg.RHYTHMTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = \
valpd.append(pd.DataFrame([valrow2],
columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code found: '
f'{valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from RHYTHM sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed RHYTHMTIME
aecglead.LEADTIME = copy.deepcopy(aecg.RHYTHMTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"ORIGIN is not a "\
"number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(
aecg.filename, "RHYTHM",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE '
f'value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_"\
"SCALE is not a "\
"number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename, "RHYTHM", "SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.RHYTHMLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_derived_waveform_timeseries(aecg_doc: etree._ElementTree,
aecg: Aecg,
include_digits: bool = False,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts derived's timeseries
This function parses the `aecg_doc` xml document searching for derived
waveform timeseries (sequences) information that includes in the returned
:any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the
:any:`Aecg.DERIVEDLEADS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
include_digits (bool, optional): Indicates whether to include the
digits information in the returned `Aecg`.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
path_prefix = './component/series/derivation/derivedSeries/component'\
'/sequenceSet/component/sequence'
seqnodes = aecg_doc.xpath((path_prefix + '/code').replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(seqnodes) > 0:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet(s) found: '
f'{len(seqnodes)} sequenceSet nodes')
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet not found')
for xmlnode in seqnodes:
xmlnode_path = aecg_doc.getpath(xmlnode)
valrow = validate_xpath(aecg_doc,
xmlnode_path,
"urn:hl7-org:v3",
"code",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_CODE"),
failcat="WARNING")
valpd = pd.DataFrame()
if valrow["VALIOUT"] == "PASSED":
if not valrow["VALUE"] in SEQUENCE_CODES:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED unexpected sequenceSet code '
f'found: {valrow["VALUE"]}')
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Unexpected sequence code found"
if valrow["VALUE"] in TIME_CODES:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
aecg.DERIVEDTIME["code"] = valrow["VALUE"]
# Retrieve time head info from value node
rel_path = "../value/head"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"value",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_HEAD"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD found: '
f'{valrow2["VALUE"]}')
aecg.DERIVEDTIME["head"] = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_HEAD not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrieve time increment info from value node
rel_path = "../value/increment"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_TIME_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_TIME_{n} found: '
f'{valrow2["VALUE"]}')
if n == "value":
aecg.DERIVEDTIME["increment"] =\
float(valrow2["VALUE"])
else:
aecg.DERIVEDTIME[n] = valrow2["VALUE"]
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED sequenceSet code found: {valrow["VALUE"]}')
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'LEADNAME from DERIVED sequenceSet code: '
f'{valrow["VALUE"]}')
# Assume is a lead
aecglead = AecgLead()
aecglead.leadname = valrow["VALUE"]
# Inherit last parsed DERIVEDTIME
aecglead.LEADTIME = copy.deepcopy(aecg.DERIVEDTIME)
# Retrive lead origin info
rel_path = "../value/origin"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_ORIGIN_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.origin = float(valrow2["VALUE"])
except Exception as ex:
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = \
"SEQUENCE_LEAD_ORIGIN is not a number"
else:
aecglead.origin_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_ORIGIN_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Retrive lead scale info
rel_path = "../value/scale"
for n in ["value", "unit"]:
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
n,
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_SCALE_" + n),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} '
f'found: {valrow2["VALUE"]}')
if n == "value":
try:
aecglead.scale = float(valrow2["VALUE"])
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE'
f' value is not a valid number: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "SEQUENCE_LEAD_SCALE"\
" is not a number"
else:
aecglead.scale_unit = valrow2["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'DERIVED SEQUENCE_LEAD_SCALE_{n} not found')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
# Include digits if requested
if include_digits:
rel_path = "../value/digits"
valrow2 = validate_xpath(
xmlnode,
rel_path,
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "DERIVED",
"SEQUENCE_LEAD_DIGITS"),
failcat="WARNING")
valrow2["XPATH"] = xmlnode_path + "/" + rel_path
if valrow2["VALIOUT"] == "PASSED":
try:
# Convert string of digits to list of integers
# remove new lines
sdigits = valrow2["VALUE"].replace("\n", " ")
# remove carriage retruns
sdigits = sdigits.replace("\r", " ")
# remove tabs
sdigits = sdigits.replace("\t", " ")
# collapse 2 or more spaces into 1 space char
# and remove leading/trailing white spaces
sdigits = re.sub("\\s+", " ", sdigits).strip()
# Convert string into list of integers
aecglead.digits = [int(s) for s in
sdigits.split(' ')]
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS added to lead'
f' {aecglead.leadname} (n: '
f'{len(aecglead.digits)})')
except Exception as ex:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'Error parsing DIGITS from '
f'string to list of integers: \"{ex}\"')
valrow2["VALIOUT"] == "ERROR"
valrow2["VALIMSG"] = "Error parsing SEQUENCE_"\
"LEAD_DIGITS from string"\
" to list of integers"
else:
logger.error(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS not found for lead {aecglead.leadname}')
if log_validation:
valpd = valpd.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'DIGITS were not requested by the user')
aecg.DERIVEDLEADS.append(copy.deepcopy(aecglead))
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'RHYTHM sequenceSet code not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if valpd.shape[0] > 0:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
return aecg
def parse_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
anngrp: Dict,
log_validation: bool = False):
"""Parses `aecg_doc` XML document and extracts waveform annotations
This function parses the `aecg_doc` xml document searching for
waveform annotation sets that includes in the returned
:any:`Aecg`. As indicated in the `anngrp` parameter, each annotation set
is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.RHYTHMANNS`
or :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
anngrp (Dict): includes a `valgroup` key indicating whether the
rhythm or derived waveform annotations should be located, and a
`path_prefix` with the xml path prefix for which start searching
for annotation sets in the `aecg_doc` xml document.
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
val_grp = anngrp["valgroup"]
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations started')
path_prefix = anngrp["path_prefix"]
anns_setnodes = aecg_doc.xpath(path_prefix.replace('/', '/ns:'),
namespaces={'ns': 'urn:hl7-org:v3'})
if len(anns_setnodes) == 0:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'{anngrp["valgroup"]}: no annotation nodes found')
for xmlnode in anns_setnodes:
aecgannset = AecgAnnotationSet()
xmlnode_path = aecg_doc.getpath(xmlnode)
# Annotation set: human author information
valrow = validate_xpath(
aecg_doc,
xmlnode_path + "/author/assignedEntity/assignedAuthorType/"
"assignedPerson/name",
"urn:hl7-org:v3",
"",
new_validation_row(aecg.filename, "RHYTHM", "ANNSET_AUTHOR_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author: {valrow["VALUE"]}')
aecgannset.person = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations author not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Annotation set: device author information
valrow = validate_xpath(aecg_doc,
xmlnode_path + "/author/assignedEntity"
"/assignedAuthorType/"
"assignedDevice/"
"manufacturerModelName",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_MODEL"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model: {tmp}')
aecgannset.device["model"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device model not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
valrow = validate_xpath(aecg_doc,
xmlnode_path +
"/author/assignedEntity/"
"assignedAuthorType/assignedDevice/"
"playedManufacturedDevice/"
"manufacturerOrganization/name",
"urn:hl7-org:v3",
"",
new_validation_row(
aecg.filename,
"RHYTHM",
"ANNSET_AUTHOR_DEVICE_NAME"),
failcat="WARNING")
if valrow["VALIOUT"] == "PASSED":
tmp = valrow["VALUE"].replace("\n", "")
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name: {tmp}')
aecgannset.device["name"] = valrow["VALUE"]
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} annotations device name not found')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
aecgannset, valpd = parse_annotations(aecg.filename, aecg.zipContainer,
aecg_doc,
aecgannset,
path_prefix,
xmlnode_path,
anngrp["valgroup"],
log_validation)
if len(aecgannset.anns) == 0:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp} no annotations set found')
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(valpd, ignore_index=True)
if anngrp["valgroup"] == "RHYTHM":
aecg.RHYTHMANNS.append(copy.deepcopy(aecgannset))
else:
aecg.DERIVEDANNS.append(copy.deepcopy(aecgannset))
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'{val_grp}: searching annotations finished')
return aecg
def parse_rhythm_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts rhythm waveform annotations
This function parses the `aecg_doc` xml document searching for rhtyhm
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.RHYTHMANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "RHYTHM",
"path_prefix": "./component/series/subjectOf/annotationSet"},
log_validation)
return aecg
def parse_derived_waveform_annotations(aecg_doc: etree._ElementTree,
aecg: Aecg,
log_validation: bool = False) -> Aecg:
"""Parses `aecg_doc` XML document and extracts derived waveform annotations
This function parses the `aecg_doc` xml document searching for derived
waveform annotation sets that includes in the returned
:any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet`
in the :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`.
Args:
aecg_doc (etree._ElementTree): aECG XML document
aecg (Aecg): The aECG object to update
log_validation (bool, optional): Indicates whether to maintain the
validation results in `aecg.validatorResults`. Defaults to
False.
Returns:
Aecg: `aecg` updated with the information found in the xml document.
"""
aecg = parse_waveform_annotations(
aecg_doc, aecg,
{"valgroup": "DERIVED",
"path_prefix": "./component/series/derivation/"
"derivedSeries/subjectOf/annotationSet"},
log_validation)
return aecg
def read_aecg(xml_filename: str, zip_container: str = "",
include_digits: bool = False,
aecg_schema_filename: str = "",
ns_clean: bool = True, remove_blank_text: bool = True,
in_memory_xml: bool = False,
log_validation: bool = False) -> Aecg:
"""Reads an aECG HL7 XML file and returns an `Aecg` object.
Args:
xml_filename (str): Path to the aECG xml file.
zip_container (str, optional): Zipfile containing the aECG xml. Empty
string if path points to an xml file in the system. Defaults to "".
include_digits (bool, optional): Waveform values are not read nor
parsed if False. Defaults to False.
aecg_schema_filename (str, optional): xsd file to instantiate the
lxml.etree.XMLSchema object for validating the aECG xml document.
Schema validation is not performed if empty string is provided.
Defaults to "".
ns_clean (bool, optional): Indicates whether to clean up namespaces
during XML parsing. Defaults to True.
remove_blank_text (bool, optional): Indicates whether to clean up blank
text during parsing. Defaults to True.
in_memory_xml (bool, optional): If True, keeps a copy of the parsed XML
in :attr:`xmldoc`.
log_validation (bool, optional): If True, populates
:attr:`validatorResults` with parsing information retrieved while
reading and parsing the aECG xml file.
Returns:
Aecg: An aECG object instantiated with the information read from
the `xml_filename` file.
"""
# =======================================
# Initialize Aecg object
# =======================================
aecg = Aecg()
aecg.filename = xml_filename
aecg.zipContainer = zip_container
# =======================================
# Read XML document
# =======================================
aecg_doc = None
parser = etree.XMLParser(ns_clean=ns_clean,
remove_blank_text=remove_blank_text)
if zip_container == "":
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} [no zip container]')
valrow = new_validation_row(xml_filename, "READFILE", "FILENAME")
valrow["VALUE"] = xml_filename
try:
aecg_doc = etree.parse(xml_filename, parser)
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not open or parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
# Add row with zipcontainer rule as PASSED because there is no zip
# container to test
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALIOUT"] = "PASSED"
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
else:
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'Reading aecg from {xml_filename} '
f'[zip container: {zip_container}]')
valrow = new_validation_row(xml_filename, "READFILE", "ZIPCONTAINER")
valrow["VALUE"] = zip_container
try:
with zipfile.ZipFile(zip_container, "r") as zf:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'Zip file opened')
valrow2 = new_validation_row(xml_filename, "READFILE",
"FILENAME")
valrow2["VALUE"] = xml_filename
try:
aecg0 = zf.read(xml_filename)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file read from zip file')
try:
aecg_doc = etree.fromstring(aecg0, parser)
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file loaded and parsed')
except Exception as ex:
msg = f'Could not parse XML file: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow2["VALIOUT"] = "ERROR"
valrow2["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = \
aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
valrow2["VALIOUT"] = "PASSED"
valrow2["VALIMSG"] = ""
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not find or read XML file in the zip file: '\
f'\"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow2["VALIOUT"] = "ERROR"
valrow2["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow2], columns=VALICOLS),
ignore_index=True)
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
except Exception as ex:
msg = f'Could not open zip file container: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS),
ignore_index=True)
if aecg_doc is not None:
aecg.xmlfound = True
if not isinstance(aecg_doc, etree._ElementTree):
aecg_doc = etree.ElementTree(aecg_doc)
if (aecg.xmlfound and
(not log_validation or
((aecg.validatorResults.shape[0] == 1 and
aecg.validatorResults["VALIOUT"][0] == "PASSED") or
(aecg.validatorResults.shape[0] == 2 and
aecg.validatorResults["VALIOUT"][0] == "PASSED" and
aecg.validatorResults["VALIOUT"][1] == "PASSED")))):
# =======================================
# ECG file loaded and parsed to XML doc successfully
# =======================================
aecg.xmlfound = True
# =======================================
# Keep parsed XML if requested
# =======================================
if in_memory_xml:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML document cached in memory')
aecg.xmldoc = aecg_doc
else:
logger.debug(
f'{aecg.filename},{aecg.zipContainer},'
f'XML document not cached in memory')
# =======================================
# Validate XML doc if schema was provided
# =======================================
valrow = new_validation_row(xml_filename, "SCHEMA", "VALIDATION")
valrow["VALIOUT"] = "WARNING"
valrow["VALIMSG"] = "Schema not provided for validation"
if aecg_schema_filename is not None and aecg_schema_filename != "":
valrow["VALUE"] = aecg_schema_filename
try:
aecg_schema_doc = etree.parse(aecg_schema_filename)
try:
aecg_schema = etree.XMLSchema(aecg_schema_doc)
if aecg_schema.validate(aecg_doc):
logger.info(
f'{aecg.filename},{aecg.zipContainer},'
f'XML file passed Schema validation')
aecg.isValid = "Y"
valrow["VALIOUT"] = "PASSED"
valrow["VALIMSG"] = ""
else:
msg = f'XML file did not pass Schema validation'
logger.warning(
f'{aecg.filename},{aecg.zipContainer},{msg}')
aecg.isValid = "N"
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
except Exception as ex:
msg = f'XML Schema is not valid: \"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
except Exception as ex:
msg = f'Schema file not found or parsing of schema failed: '\
f'\"{ex}\"'
logger.error(
f'{aecg.filename},{aecg.zipContainer},{msg}')
valrow["VALIOUT"] = "ERROR"
valrow["VALIMSG"] = msg
else:
logger.warning(
f'{aecg.filename},{aecg.zipContainer},'
f'Schema not provided for XML validation')
if log_validation:
aecg.validatorResults = aecg.validatorResults.append(
pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True)
# =======================================
# UUID and EGDTC and DEVICE
# =======================================
aecg = parse_generalinfo(aecg_doc, aecg, log_validation)
# =======================================
# USUBJID, SEX/GENDER, BIRTHTIME, RACE
# =======================================
aecg = parse_subjectinfo(aecg_doc, aecg, log_validation)
# =======================================
# TRTA
# =======================================
aecg = parse_trtainfo(aecg_doc, aecg, log_validation)
# =======================================
# CLINICAL TRIAL
# =======================================
aecg = parse_studyinfo(aecg_doc, aecg, log_validation)
# =======================================
# Timepoints
# =======================================
aecg = parse_timepoints(aecg_doc, aecg, log_validation)
# =======================================
# Rhythm Waveforms information
# =======================================
aecg = parse_rhythm_waveform_info(aecg_doc, aecg, log_validation)
# =======================================
# Derived Waveforms information
# =======================================
aecg = parse_derived_waveform_info(aecg_doc, aecg, log_validation)
# =======================================
# Rhythm Waveforms timeseries
# =======================================
aecg = parse_rhythm_waveform_timeseries(aecg_doc, aecg, include_digits,
log_validation)
# =======================================
# Derived Waveforms timeseries
# =======================================
aecg = parse_derived_waveform_timeseries(aecg_doc, aecg,
include_digits,
log_validation)
# =======================================
# Rhythm and Derived Waveforms annotations
# =======================================
aecg = parse_rhythm_waveform_annotations(
aecg_doc, aecg, log_validation)
aecg = parse_derived_waveform_annotations(
aecg_doc, aecg, log_validation)
return aecg
| en | 0.625698 | I/O functions of the aecg package: tools for annotated ECG HL7 XML files This module implements helper functions to parse and read annotated electrocardiogram (ECG) stored in XML files following HL7 specification. See authors, license and disclaimer at the top level directory of this project. # Imports ===================================================================== # Python logging ============================================================== Parses `aecg_doc` XML document and extracts annotations Args: xml_filename (str): Filename of the aECG XML file. zip_filename (str): Filename of zip file containint the aECG XML file. If '', then xml file is not stored in a zip file. aecg_doc (etree._ElementTree): XML document of the aECG XML file. aecgannset (AecgAnnotationSet): Annotation set to which append found annotations. path_prefix (str): Prefix of xml path from which start searching for annotations. annsset_xmlnode_path (str): Path to xml node of the annotation set containing the annotations. valgroup (str, optional): Indicates whether to search annotations in rhythm or derived waveform. Defaults to "RHYTHM". log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with found annotations and dataframe with results of validation. # Annotations stored within a beat # Annotation code # Annotation type from top level value # Annotations type # Exclude annotations reporting interval values only # Annotations type # Annotations value # Annotations value units # annotations info from supporting ROI # annotations time encoding, lead and other info used # by value and supporting ROI # Annotations type # Annotations value # Annotations value units # annotations time encoding, lead and other info used # by value and supporting ROI # Annotations stored without an associated beat # Annotations code # Annotation type from top level value # Annotations type # if ann["wavecomponent"] == "": # ann["wavecomponent"] = valrow2["VALUE"] # else: # ann["wavecomponent2"] = valrow2["VALUE"] # Annotations value # Annotations value as attribute # Annotations value units # annotations info from supporting ROI # Annotations type # annotation values # annotations time encoding, lead and other info used by # value and supporting ROI Parses `aecg_doc` XML document and extracts general information This function parses the `aecg_doc` xml document searching for general information that includes in the returned `Aecg`: unique identifier (UUID), ECG date and time of collection (EGDTC), and device information. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # ======================================= # UUID # ======================================= # ======================================= # EGDTC # ======================================= # ======================================= # DEVICE # ======================================= # DEVICE = {"manufacturer": "", "model": "", "software": ""} Parses `aecg_doc` XML document and extracts subject information This function parses the `aecg_doc` xml document searching for subject information that includes in the returned `Aecg`: subject unique identifier (USUBJID), gender, birthtime, and race. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # ======================================= # USUBJID # ======================================= # ======================================= # SEX / GENDER # ======================================= # ======================================= # BIRTHTIME # ======================================= # age_in_years = aecg.subject_age_in_years() # ======================================= # RACE # ======================================= Parses `aecg_doc` XML document and extracts subject information This function parses the `aecg_doc` xml document searching for treatment information that includes in the returned `Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Parses `aecg_doc` XML document and extracts study information This function parses the `aecg_doc` xml document searching for study information that includes in the returned `Aecg`: study unique identifier (STUDYID), and study title. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Parses `aecg_doc` XML document and extracts timepoints information This function parses the `aecg_doc` xml document searching for timepoints information that includes in the returned `Aecg`: absolute timepoint or study event information (TPT), relative timepoint or study event relative to a reference event (RTPT), and protocol timepoint information (PTPT). Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # ======================================= # TPT # ======================================= # ======================================= # RTPT # ======================================= # ======================================= # PTPT # ======================================= Parses `aecg_doc` XML document and extracts rhythm waveform information This function parses the `aecg_doc` xml document searching for rhythm waveform information that includes in the returned `Aecg`: waveform identifier, code, display name, and date and time of collection. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Parses `aecg_doc` XML document and extracts derived waveform information This function parses the `aecg_doc` xml document searching for derived waveform information that includes in the returned `Aecg`: waveform identifier, code, display name, and date and time of collection. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Parses `aecg_doc` XML document and extracts rhythm's timeseries This function parses the `aecg_doc` xml document searching for rhythm waveform timeseries (sequences) information that includes in the returned :any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the :any:`Aecg.RHYTHMLEADS` list of the returned :any:`Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update include_digits (bool, optional): Indicates whether to include the digits information in the returned `Aecg`. log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # Retrieve time head info from value node # Retrieve time increment info from value node # Assume is a lead # Inherit last parsed RHYTHMTIME # Retrive lead origin info # Retrive lead scale info # Include digits if requested # Convert string of digits to list of integers # remove new lines # remove carriage retruns # remove tabs # collapse 2 or more spaces into 1 space char # and remove leading/trailing white spaces # Convert string into list of integers Parses `aecg_doc` XML document and extracts derived's timeseries This function parses the `aecg_doc` xml document searching for derived waveform timeseries (sequences) information that includes in the returned :any:`Aecg`. Each found sequence is stored as an :any:`AecgLead` in the :any:`Aecg.DERIVEDLEADS` list of the returned :any:`Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update include_digits (bool, optional): Indicates whether to include the digits information in the returned `Aecg`. log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # Retrieve time head info from value node # Retrieve time increment info from value node # Assume is a lead # Inherit last parsed DERIVEDTIME # Retrive lead origin info # Retrive lead scale info # Include digits if requested # Convert string of digits to list of integers # remove new lines # remove carriage retruns # remove tabs # collapse 2 or more spaces into 1 space char # and remove leading/trailing white spaces # Convert string into list of integers Parses `aecg_doc` XML document and extracts waveform annotations This function parses the `aecg_doc` xml document searching for waveform annotation sets that includes in the returned :any:`Aecg`. As indicated in the `anngrp` parameter, each annotation set is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.RHYTHMANNS` or :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update anngrp (Dict): includes a `valgroup` key indicating whether the rhythm or derived waveform annotations should be located, and a `path_prefix` with the xml path prefix for which start searching for annotation sets in the `aecg_doc` xml document. log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. # Annotation set: human author information # Annotation set: device author information Parses `aecg_doc` XML document and extracts rhythm waveform annotations This function parses the `aecg_doc` xml document searching for rhtyhm waveform annotation sets that includes in the returned :any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.RHYTHMANNS` list of the returned :any:`Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Parses `aecg_doc` XML document and extracts derived waveform annotations This function parses the `aecg_doc` xml document searching for derived waveform annotation sets that includes in the returned :any:`Aecg`. Each annotation set is stored as an :any:`AecgAnnotationSet` in the :any:`Aecg.DERIVEDANNS` list of the returned :any:`Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. Reads an aECG HL7 XML file and returns an `Aecg` object. Args: xml_filename (str): Path to the aECG xml file. zip_container (str, optional): Zipfile containing the aECG xml. Empty string if path points to an xml file in the system. Defaults to "". include_digits (bool, optional): Waveform values are not read nor parsed if False. Defaults to False. aecg_schema_filename (str, optional): xsd file to instantiate the lxml.etree.XMLSchema object for validating the aECG xml document. Schema validation is not performed if empty string is provided. Defaults to "". ns_clean (bool, optional): Indicates whether to clean up namespaces during XML parsing. Defaults to True. remove_blank_text (bool, optional): Indicates whether to clean up blank text during parsing. Defaults to True. in_memory_xml (bool, optional): If True, keeps a copy of the parsed XML in :attr:`xmldoc`. log_validation (bool, optional): If True, populates :attr:`validatorResults` with parsing information retrieved while reading and parsing the aECG xml file. Returns: Aecg: An aECG object instantiated with the information read from the `xml_filename` file. # ======================================= # Initialize Aecg object # ======================================= # ======================================= # Read XML document # ======================================= # Add row with zipcontainer rule as PASSED because there is no zip # container to test # ======================================= # ECG file loaded and parsed to XML doc successfully # ======================================= # ======================================= # Keep parsed XML if requested # ======================================= # ======================================= # Validate XML doc if schema was provided # ======================================= # ======================================= # UUID and EGDTC and DEVICE # ======================================= # ======================================= # USUBJID, SEX/GENDER, BIRTHTIME, RACE # ======================================= # ======================================= # TRTA # ======================================= # ======================================= # CLINICAL TRIAL # ======================================= # ======================================= # Timepoints # ======================================= # ======================================= # Rhythm Waveforms information # ======================================= # ======================================= # Derived Waveforms information # ======================================= # ======================================= # Rhythm Waveforms timeseries # ======================================= # ======================================= # Derived Waveforms timeseries # ======================================= # ======================================= # Rhythm and Derived Waveforms annotations # ======================================= | 2.779684 | 3 |
infra/packages/ninja.py | vusec/instrumentation-infra | 12 | 6618074 | <filename>infra/packages/ninja.py
import os
import shutil
from ..package import Package
from ..util import run, download
class Ninja(Package):
"""
:identifier: ninja-<version>
:param version: version to download
"""
def __init__(self, version: str):
self.version = version
def ident(self):
return 'ninja-' + self.version
def fetch(self, ctx):
tarname = 'v%s.tar.gz' % self.version
download(ctx, 'https://github.com/ninja-build/ninja/archive/' + tarname)
run(ctx, ['tar', '-xf', tarname])
shutil.move('ninja-' + self.version, 'src')
os.remove(tarname)
def build(self, ctx):
os.makedirs('obj', exist_ok=True)
os.chdir('obj')
run(ctx, '../src/configure.py --bootstrap')
def install(self, ctx):
os.makedirs('install/bin', exist_ok=True)
shutil.copy('obj/ninja', 'install/bin')
def is_fetched(self, ctx):
return os.path.exists('src')
def is_built(self, ctx):
return os.path.exists('obj/ninja')
def is_installed(self, ctx):
return os.path.exists('install/bin/ninja')
| <filename>infra/packages/ninja.py
import os
import shutil
from ..package import Package
from ..util import run, download
class Ninja(Package):
"""
:identifier: ninja-<version>
:param version: version to download
"""
def __init__(self, version: str):
self.version = version
def ident(self):
return 'ninja-' + self.version
def fetch(self, ctx):
tarname = 'v%s.tar.gz' % self.version
download(ctx, 'https://github.com/ninja-build/ninja/archive/' + tarname)
run(ctx, ['tar', '-xf', tarname])
shutil.move('ninja-' + self.version, 'src')
os.remove(tarname)
def build(self, ctx):
os.makedirs('obj', exist_ok=True)
os.chdir('obj')
run(ctx, '../src/configure.py --bootstrap')
def install(self, ctx):
os.makedirs('install/bin', exist_ok=True)
shutil.copy('obj/ninja', 'install/bin')
def is_fetched(self, ctx):
return os.path.exists('src')
def is_built(self, ctx):
return os.path.exists('obj/ninja')
def is_installed(self, ctx):
return os.path.exists('install/bin/ninja')
| en | 0.357983 | :identifier: ninja-<version> :param version: version to download | 2.572129 | 3 |
classify/run.py | schollz/hens | 40 | 6618075 | import random
import shutil
import json
from SimpleCV import *
import glob
class Trainer():
def __init__(self,classes, trainPaths):
self.classes = classes
self.trainPaths = trainPaths
def getExtractors(self):
hhfe = HueHistogramFeatureExtractor(32)
ehfe = EdgeHistogramFeatureExtractor(32)
haarfe = HaarLikeFeatureExtractor(fname='simplecv/haar.txt')
return [hhfe,ehfe,haarfe]
def getClassifiers(self,extractors):
svm = SVMClassifier(extractors)
tree = TreeClassifier(extractors)
bayes = NaiveBayesClassifier(extractors)
knn = KNNClassifier(extractors)
return [svm,tree,bayes,knn]
def train(self):
self.classifiers = self.getClassifiers(self.getExtractors())
for classifier in self.classifiers:
classifier.train(self.trainPaths,self.classes,verbose=False)
def test(self,testPaths):
for classifier in self.classifiers:
print classifier.test(testPaths,self.classes,verbose=False)
def visualizeResults(self,classifier,imgs):
for img in imgs:
className = classifier.classify(img)
img.drawText(className,10,10,fontsize=60,color=Color.BLUE)
imgs.show()
def main():
classes = ['none','chicken']
print("Moving images...")
if os.path.exists("images"):
shutil.rmtree("images")
os.makedirs("images")
for c in classes:
os.makedirs(os.path.join("images",c))
os.makedirs(os.path.join("images",c,"train"))
os.makedirs(os.path.join("images",c,"test"))
for txt in glob.glob("data/*.txt"):
j = json.load(open(txt,'r'))
imageName = txt.replace(".txt",".jpg")
trainOrTest = "train"
if random.random() < 0.2:
trainOrTest = "test"
if 'none' in j['Presence']:
shutil.copyfile(imageName,os.path.join("images","none",trainOrTest,imageName.split("/")[-1]))
else:
shutil.copyfile(imageName,os.path.join("images","chicken",trainOrTest,imageName.split("/")[-1]))
trainPaths = ['./images/'+c+'/train/' for c in classes ]
testPaths = ['./images/'+c+'/test/' for c in classes ]
print("Training %d classes..." % len(trainPaths))
trainer = Trainer(classes,trainPaths)
trainer.train()
tree = trainer.classifiers[1]
imgs = ImageSet()
for p in testPaths:
imgs += ImageSet(p)
random.shuffle(imgs)
print("Testing %d classes with all classifiers..." % len(testPaths))
trainer.test(testPaths)
# print(trainer.visualizeResults(tree,imgs))
tree.save("tree.dat")
print("Testing with TreeClassifier...")
classifierFile = 'tree.dat'
classifier = TreeClassifier.load(classifierFile)
count = 0
correct_count = 0
for path in glob.glob("images/*/test/*jpg"):
guess = classifier.classify(Image(path))
print(path,guess)
if guess in path:
correct_count += 1
count += 1
print("%d correct out of %d = %2.1f accuracy" %(correct_count,count,100.0*correct_count/count))
main()
| import random
import shutil
import json
from SimpleCV import *
import glob
class Trainer():
def __init__(self,classes, trainPaths):
self.classes = classes
self.trainPaths = trainPaths
def getExtractors(self):
hhfe = HueHistogramFeatureExtractor(32)
ehfe = EdgeHistogramFeatureExtractor(32)
haarfe = HaarLikeFeatureExtractor(fname='simplecv/haar.txt')
return [hhfe,ehfe,haarfe]
def getClassifiers(self,extractors):
svm = SVMClassifier(extractors)
tree = TreeClassifier(extractors)
bayes = NaiveBayesClassifier(extractors)
knn = KNNClassifier(extractors)
return [svm,tree,bayes,knn]
def train(self):
self.classifiers = self.getClassifiers(self.getExtractors())
for classifier in self.classifiers:
classifier.train(self.trainPaths,self.classes,verbose=False)
def test(self,testPaths):
for classifier in self.classifiers:
print classifier.test(testPaths,self.classes,verbose=False)
def visualizeResults(self,classifier,imgs):
for img in imgs:
className = classifier.classify(img)
img.drawText(className,10,10,fontsize=60,color=Color.BLUE)
imgs.show()
def main():
classes = ['none','chicken']
print("Moving images...")
if os.path.exists("images"):
shutil.rmtree("images")
os.makedirs("images")
for c in classes:
os.makedirs(os.path.join("images",c))
os.makedirs(os.path.join("images",c,"train"))
os.makedirs(os.path.join("images",c,"test"))
for txt in glob.glob("data/*.txt"):
j = json.load(open(txt,'r'))
imageName = txt.replace(".txt",".jpg")
trainOrTest = "train"
if random.random() < 0.2:
trainOrTest = "test"
if 'none' in j['Presence']:
shutil.copyfile(imageName,os.path.join("images","none",trainOrTest,imageName.split("/")[-1]))
else:
shutil.copyfile(imageName,os.path.join("images","chicken",trainOrTest,imageName.split("/")[-1]))
trainPaths = ['./images/'+c+'/train/' for c in classes ]
testPaths = ['./images/'+c+'/test/' for c in classes ]
print("Training %d classes..." % len(trainPaths))
trainer = Trainer(classes,trainPaths)
trainer.train()
tree = trainer.classifiers[1]
imgs = ImageSet()
for p in testPaths:
imgs += ImageSet(p)
random.shuffle(imgs)
print("Testing %d classes with all classifiers..." % len(testPaths))
trainer.test(testPaths)
# print(trainer.visualizeResults(tree,imgs))
tree.save("tree.dat")
print("Testing with TreeClassifier...")
classifierFile = 'tree.dat'
classifier = TreeClassifier.load(classifierFile)
count = 0
correct_count = 0
for path in glob.glob("images/*/test/*jpg"):
guess = classifier.classify(Image(path))
print(path,guess)
if guess in path:
correct_count += 1
count += 1
print("%d correct out of %d = %2.1f accuracy" %(correct_count,count,100.0*correct_count/count))
main()
| en | 0.346981 | # print(trainer.visualizeResults(tree,imgs)) | 2.642088 | 3 |
class7/json-vid-5-eapi-lib.py | brutalic/pynet_brutal | 0 | 6618076 | <filename>class7/json-vid-5-eapi-lib.py
#!/usr/bin/python
import pyeapi
import ssl
from pprint import pprint
ssl._create_default_https_context = ssl._create_unverified_context
#ip = '172.16.58.3'
#port = '443'
#username = 'admin1'
#password = '<PASSWORD>'
pynet_sw3 = pyeapi.connect_to("pynet-sw3")
pynet_sw4 = pyeapi.connect_to("pynet-sw4")
print pynet_sw3
print pynet_sw4
#print pyeapi.config_for('pynet-sw3')
config3 = pynet_sw3.get_config()
for line in config3:
print line
sw3_version = pynet_sw3.enable("show version")
pprint(sw3_version)
sw3_arp = pynet_sw3.enable("show arp")
pprint(sw3_arp)
vlan_commands = ['vlan 333', 'name dude', 'vlan 444', 'name dudesky']
sw4_vlan = pynet_sw4.config(vlan_commands)
print sw4_vlan
config4 = pynet_sw4.get_config()
for line in config4:
print line
| <filename>class7/json-vid-5-eapi-lib.py
#!/usr/bin/python
import pyeapi
import ssl
from pprint import pprint
ssl._create_default_https_context = ssl._create_unverified_context
#ip = '172.16.58.3'
#port = '443'
#username = 'admin1'
#password = '<PASSWORD>'
pynet_sw3 = pyeapi.connect_to("pynet-sw3")
pynet_sw4 = pyeapi.connect_to("pynet-sw4")
print pynet_sw3
print pynet_sw4
#print pyeapi.config_for('pynet-sw3')
config3 = pynet_sw3.get_config()
for line in config3:
print line
sw3_version = pynet_sw3.enable("show version")
pprint(sw3_version)
sw3_arp = pynet_sw3.enable("show arp")
pprint(sw3_arp)
vlan_commands = ['vlan 333', 'name dude', 'vlan 444', 'name dudesky']
sw4_vlan = pynet_sw4.config(vlan_commands)
print sw4_vlan
config4 = pynet_sw4.get_config()
for line in config4:
print line
| en | 0.261798 | #!/usr/bin/python #ip = '172.16.58.3' #port = '443' #username = 'admin1' #password = '<PASSWORD>' #print pyeapi.config_for('pynet-sw3') | 2.088856 | 2 |
nearpy/hashes/permutation/permutation.py | WajihCZ/NearPy | 0 | 6618077 | <reponame>WajihCZ/NearPy
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#from bitarray import bitarray
from nearpy.hashes.permutation.permute import Permute
from nearpy.hashes.permutation.permutedIndex import PermutedIndex
class Permutation:
"""
The pumutation class
"""
def __init__(self):
# self.permutedIndexs' key is the corresponding lshash's hash_name
self.permutedIndexs = {}
def build_permuted_index(
self,
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour):
pi = PermutedIndex(
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour)
hash_name = lshash.hash_name
self.permutedIndexs[hash_name] = pi
def get_neighbour_keys(self, hash_name, bucket_key):
permutedIndex = self.permutedIndexs[hash_name]
return permutedIndex.get_neighbour_keys(
bucket_key,
permutedIndex.num_neighbour)
| # -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#from bitarray import bitarray
from nearpy.hashes.permutation.permute import Permute
from nearpy.hashes.permutation.permutedIndex import PermutedIndex
class Permutation:
"""
The pumutation class
"""
def __init__(self):
# self.permutedIndexs' key is the corresponding lshash's hash_name
self.permutedIndexs = {}
def build_permuted_index(
self,
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour):
pi = PermutedIndex(
lshash,
buckets,
num_permutation,
beam_size,
num_neighbour)
hash_name = lshash.hash_name
self.permutedIndexs[hash_name] = pi
def get_neighbour_keys(self, hash_name, bucket_key):
permutedIndex = self.permutedIndexs[hash_name]
return permutedIndex.get_neighbour_keys(
bucket_key,
permutedIndex.num_neighbour) | en | 0.751783 | # -*- coding: utf-8 -*- # Copyright (c) 2013 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #from bitarray import bitarray The pumutation class # self.permutedIndexs' key is the corresponding lshash's hash_name | 1.952974 | 2 |
fondo_api/tests/test_user_views.py | Fonmon/Fondo-API | 0 | 6618078 | import json
from mock import patch
from django.urls import reverse
from django.test.client import encode_multipart
from rest_framework import status
from django.core import mail
from fondo_api.models import *
from fondo_api.tests.abstract_test import AbstractTest
from fondo_api.services.mail import MailService
from fondo_api.enums import EmailTemplate
view_user = 'view_user'
view_user_detail = 'view_user_detail'
view_user_activate = 'view_user_activate'
view_user_apps = 'view_user_apps'
class UserViewTest(AbstractTest):
def setUp(self):
self.create_user()
self.create_basic_users();
self.token = self.get_token('<EMAIL>','password')
self.object_json = {
'first_name': '<NAME>',
'last_name': 'Last Name',
'identification': 123,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_identification_r = {
'first_name': 'Foo Name 2',
'last_name': 'Last Name 2',
'identification': 99999,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_email_r = {
'first_name': 'Foo Name 3',
'last_name': 'Last Name 3',
'identification': 1234,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_user_update = {
'personal': {
'identification':123,
'first_name': 'Foo Name update',
'last_name': 'Last Name update',
'email': '<EMAIL>',
'role': 2,
'birthdate': '1995-11-07'
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
self.object_json_user_update_same_finance = {
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota':1000,
'available_quota': 500,
'utilized_quota':0
},
'personal': {
'identification':123,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2,
}
}
self.object_json_user_update_email_r = {
'personal': {
'identification':12312451241243,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
self.object_json_user_update_identification_r = {
'personal': {
'identification':123,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
@patch.object(MailService, 'send_mail')
def test_success_post(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.identification,123)
self.assertEqual(user.email, "<EMAIL>")
self.assertEqual(user.role, 2)
self.assertEqual(user.get_role_display(),'TREASURER')
self.assertIsNotNone(user.key_activation)
self.assertFalse(user.is_active)
self.assertTrue(mock.called)
mock.assert_called_once_with(EmailTemplate.USER_ACTIVATION, [user.email], {
'user_full_name': '{} {}'.format(user.first_name, user.last_name),
'user_id': user.id,
'user_key': user.key_activation,
'host_url': 'http://localhost:3000'
})
self.assertEqual(len(UserProfile.objects.all()), 12)
self.assertEqual(len(UserFinance.objects.all()), 12)
@patch.object(MailService, 'send_mail', return_value=False)
def test_invalid_email(self,mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type = 'application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'], 'Invalid email')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_unsuccess_post_identification(self):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json_identification_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'],'Identification/email already exists')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_unsuccess_post_email(self):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json_email_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'],'Identification/email already exists')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_get_users(self):
response = self.client.get(
"%s?page=1" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 10)
self.assertEqual(response.data['num_pages'], 2)
for user in response.data['list']:
self.assertIsNotNone(user['id'])
self.assertIsNotNone(user['identification'])
self.assertIsNotNone(user['full_name'])
self.assertIsNotNone(user['email'])
self.assertIsNotNone(user['role'])
def test_get_users_all(self):
response = self.client.get(
reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 11)
for user in response.data['list']:
self.assertIsNotNone(user['id'])
self.assertIsNotNone(user['identification'])
self.assertIsNotNone(user['full_name'])
self.assertIsNotNone(user['email'])
self.assertIsNotNone(user['role'])
def test_get_users_empty(self):
response = self.client.get(
"%s?page=3" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']),0)
self.assertEqual(response.data['num_pages'],2)
def test_get_users_error_pagination(self):
response = self.client.get(
"%s?page=0" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['message'],'Page number must be greater than 0')
def test_get_user(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'],1)
self.assertEqual(response.data['user']['identification'],99999)
self.assertEqual(response.data['user']['first_name'],'<NAME>')
self.assertEqual(response.data['user']['last_name'],'<NAME>')
self.assertEqual(response.data['user']['email'],'<EMAIL>')
self.assertEqual(response.data['user']['role'],0)
self.assertEqual(response.data['user']['role_display'], 'ADMIN')
self.assertEqual(response.data['finance']['contributions'],2000)
self.assertEqual(response.data['finance']['balance_contributions'],2000)
self.assertEqual(response.data['finance']['total_quota'],1000)
self.assertEqual(response.data['finance']['available_quota'],500)
self.assertEqual(response.data['finance']['utilized_quota'],0)
def test_get_session_user(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': -1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'],1)
self.assertEqual(response.data['user']['identification'],99999)
self.assertEqual(response.data['user']['first_name'],'<NAME>')
self.assertEqual(response.data['user']['last_name'],'<NAME>')
self.assertEqual(response.data['user']['email'],'<EMAIL>')
self.assertEqual(response.data['user']['role'],0)
self.assertEqual(response.data['user']['role_display'], 'ADMIN')
self.assertEqual(response.data['finance']['contributions'],2000)
self.assertEqual(response.data['finance']['balance_contributions'],2000)
self.assertEqual(response.data['finance']['total_quota'],1000)
self.assertEqual(response.data['finance']['available_quota'],500)
self.assertEqual(response.data['finance']['utilized_quota'],0)
def test_get_user_not_found(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 2}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_user(self):
response = self.client.delete(
reverse(view_user_detail,kwargs={'id': 2}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.delete(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = User.objects.get(id = 1)
self.assertFalse(user.is_active)
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_401_UNAUTHORIZED)
try:
self.get_token('<EMAIL>','password')
self.fail('That account is inactive')
except KeyError:
pass
def test_patch_user(self):
self.object_json_user_update['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 2}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = UserProfile.objects.get(id=1)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.last_name,'<NAME>')
self.assertEqual(user.email,'<EMAIL>')
self.assertEqual(user.username,'<EMAIL>')
def test_patch_user_finance(self):
self.object_json_user_update['type'] = 'finance'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 2}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=1)
self.assertEqual(user_finance.contributions, 2000)
self.assertEqual(user_finance.balance_contributions, 2000)
self.assertEqual(user_finance.total_quota, 1000)
self.assertEqual(user_finance.utilized_quota, 500)
self.assertEqual(user_finance.available_quota, 500)
def test_patch_user_not_finance(self):
self.object_json_user_update_same_finance['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_same_finance),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = UserProfile.objects.get(id=1)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.last_name,'<NAME>')
self.assertEqual(user.email,'<EMAIL>')
self.assertEqual(user.username,'<EMAIL>')
self.object_json_user_update_same_finance['type'] = 'finance'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_same_finance),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=1)
self.assertEqual(user_finance.contributions, 2000)
self.assertEqual(user_finance.balance_contributions, 2000)
self.assertEqual(user_finance.total_quota, 1000)
self.assertEqual(user_finance.utilized_quota, 0)
self.assertEqual(user_finance.available_quota, 500)
@patch.object(MailService, 'send_mail')
def test_patch_user_conflict(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
self.object_json_user_update_identification_r['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_identification_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.object_json_user_update_email_r['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_email_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_patch_preferences_not_found(self):
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 111}),
data='{"type": "preferences", "preferences":{"notifications": true}}',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_preferences_notifications(self):
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data='''{"type": "preferences", "preferences":{"notifications": true, "primary_color": "#fff",
"secondary_color": "#000"}}''',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_preference = UserPreference.objects.get(user_id=1)
self.assertTrue(user_preference.notifications, True)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data='''{"type": "preferences", "preferences":{"notifications": false, "primary_color": "#fff",
"secondary_color": "#000"}}''',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_preference = UserPreference.objects.get(user_id=1)
self.assertFalse(user_preference.notifications, False)
@patch.object(MailService, 'send_mail')
def test_activation_successful(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':123,
'key':user.key_activation
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,True)
self.assertTrue('pbkdf2_sha256' in user.password)
self.assertIsNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_1(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234,
'key':user.key_activation
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('<PASSWORD>' in user.password)
self.assertIsNotNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_2(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234,
'key': ''
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('pbkdf2_sha256' in user.password)
self.assertIsNotNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_3(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234
}
response = self.client.post(
reverse(view_user_activate, kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('pbkdf2_sha256' in user.password)
self.assertIsNotNone(user.key_activation)
def create_test_file(self):
try:
file = open("testfile.txt","w")
file.write("1\t100\t1000\t200\t300\r\n")
file.write("2\t400\t1000\t500\t600\r\n")
file.write("3\t700\t1000\t800\t900\r\n")
file.write("4\t0\t0\t0\t0\r\n")
finally:
file.close()
return file
def test_bulk_update_users(self):
for i in range(3):
user = UserProfile.objects.create_user(
id=i+5,
first_name = '<NAME>',
last_name = '<NAME>',
identification = i+1,
username = "<EMAIL>".format(i+1),
email = "<EMAIL>".format(i+1),
password = "password"
)
UserFinance.objects.create(
contributions= 0,
balance_contributions= 0,
total_quota= 0,
available_quota= 0,
utilized_quota=0,
user= user
)
file = {}
created_file = self.create_test_file()
file_reader = open(created_file.name,'r')
file['file'] = file_reader
response = self.client.patch(
reverse(view_user),
data=encode_multipart('file',file),
content_type='multipart/form-data; boundary=file',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=5)
self.assertEqual(user_finance.balance_contributions,100)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,200)
self.assertEqual(user_finance.utilized_quota,300)
self.assertEqual(user_finance.available_quota,700)
user_finance = UserFinance.objects.get(user_id=6)
self.assertEqual(user_finance.balance_contributions,400)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,500)
self.assertEqual(user_finance.utilized_quota,600)
self.assertEqual(user_finance.available_quota,400)
user_finance = UserFinance.objects.get(user_id=7)
self.assertEqual(user_finance.balance_contributions,700)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,800)
self.assertEqual(user_finance.utilized_quota,900)
self.assertEqual(user_finance.available_quota,100)
def test_user_apps_not_found(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'noexist'}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
def test_get_users_birthdate(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'birthdates'}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data), 11)
for i in range(len(response.data)):
self.assertEqual(response.data[i]['birthdate'], None)
def test_get_powers_exception(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 2
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_get_powers_empty(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 2,
'obj': 'requestee'
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 0)
self.assertEqual(response.data['num_pages'], 1)
def test_post_power(self):
self.assertEqual(len(Power.objects.all()), 0)
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
self.assertEqual(len(Power.objects.all()), 1)
def test_get_powers_pagination(self):
self.assertEqual(len(Power.objects.all()), 0)
user = UserProfile.objects.get(identification=1001)
for i in range(11):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 1,
'obj': 'requested'
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 10)
self.assertEqual(response.data['num_pages'], 2)
def test_patch_power_denied(self):
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
power = Power.objects.all()[0]
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'patch',
'id': power.id,
'state': 2
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
power = Power.objects.all()[0]
self.assertEqual(power.state, 2)
@patch.object(MailService, 'send_mail')
def test_patch_power_approved(self, mock):
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
power = Power.objects.all()[0]
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'patch',
'id': power.id,
'state': 1
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
power = Power.objects.all()[0]
self.assertEqual(power.state, 1)
self.assertTrue(mock.called)
mock.assert_called_once() | import json
from mock import patch
from django.urls import reverse
from django.test.client import encode_multipart
from rest_framework import status
from django.core import mail
from fondo_api.models import *
from fondo_api.tests.abstract_test import AbstractTest
from fondo_api.services.mail import MailService
from fondo_api.enums import EmailTemplate
view_user = 'view_user'
view_user_detail = 'view_user_detail'
view_user_activate = 'view_user_activate'
view_user_apps = 'view_user_apps'
class UserViewTest(AbstractTest):
def setUp(self):
self.create_user()
self.create_basic_users();
self.token = self.get_token('<EMAIL>','password')
self.object_json = {
'first_name': '<NAME>',
'last_name': 'Last Name',
'identification': 123,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_identification_r = {
'first_name': 'Foo Name 2',
'last_name': 'Last Name 2',
'identification': 99999,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_email_r = {
'first_name': 'Foo Name 3',
'last_name': 'Last Name 3',
'identification': 1234,
'email': '<EMAIL>',
'username': '<EMAIL>',
'role': 2
}
self.object_json_user_update = {
'personal': {
'identification':123,
'first_name': 'Foo Name update',
'last_name': 'Last Name update',
'email': '<EMAIL>',
'role': 2,
'birthdate': '1995-11-07'
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
self.object_json_user_update_same_finance = {
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota':1000,
'available_quota': 500,
'utilized_quota':0
},
'personal': {
'identification':123,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2,
}
}
self.object_json_user_update_email_r = {
'personal': {
'identification':12312451241243,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
self.object_json_user_update_identification_r = {
'personal': {
'identification':123,
'first_name': '<NAME>',
'last_name': '<NAME>',
'email': '<EMAIL>',
'role': 2
},
'finance': {
'contributions': 2000,
'balance_contributions': 2000,
'total_quota': 1000,
'utilized_quota': 500
}
}
@patch.object(MailService, 'send_mail')
def test_success_post(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.identification,123)
self.assertEqual(user.email, "<EMAIL>")
self.assertEqual(user.role, 2)
self.assertEqual(user.get_role_display(),'TREASURER')
self.assertIsNotNone(user.key_activation)
self.assertFalse(user.is_active)
self.assertTrue(mock.called)
mock.assert_called_once_with(EmailTemplate.USER_ACTIVATION, [user.email], {
'user_full_name': '{} {}'.format(user.first_name, user.last_name),
'user_id': user.id,
'user_key': user.key_activation,
'host_url': 'http://localhost:3000'
})
self.assertEqual(len(UserProfile.objects.all()), 12)
self.assertEqual(len(UserFinance.objects.all()), 12)
@patch.object(MailService, 'send_mail', return_value=False)
def test_invalid_email(self,mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type = 'application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'], 'Invalid email')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_unsuccess_post_identification(self):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json_identification_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'],'Identification/email already exists')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_unsuccess_post_email(self):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json_email_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_409_CONFLICT)
self.assertEqual(response.data['message'],'Identification/email already exists')
self.assertEqual(len(UserProfile.objects.all()), 11)
self.assertEqual(len(UserFinance.objects.all()), 11)
def test_get_users(self):
response = self.client.get(
"%s?page=1" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 10)
self.assertEqual(response.data['num_pages'], 2)
for user in response.data['list']:
self.assertIsNotNone(user['id'])
self.assertIsNotNone(user['identification'])
self.assertIsNotNone(user['full_name'])
self.assertIsNotNone(user['email'])
self.assertIsNotNone(user['role'])
def test_get_users_all(self):
response = self.client.get(
reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 11)
for user in response.data['list']:
self.assertIsNotNone(user['id'])
self.assertIsNotNone(user['identification'])
self.assertIsNotNone(user['full_name'])
self.assertIsNotNone(user['email'])
self.assertIsNotNone(user['role'])
def test_get_users_empty(self):
response = self.client.get(
"%s?page=3" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['list']),0)
self.assertEqual(response.data['num_pages'],2)
def test_get_users_error_pagination(self):
response = self.client.get(
"%s?page=0" % reverse(view_user),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['message'],'Page number must be greater than 0')
def test_get_user(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'],1)
self.assertEqual(response.data['user']['identification'],99999)
self.assertEqual(response.data['user']['first_name'],'<NAME>')
self.assertEqual(response.data['user']['last_name'],'<NAME>')
self.assertEqual(response.data['user']['email'],'<EMAIL>')
self.assertEqual(response.data['user']['role'],0)
self.assertEqual(response.data['user']['role_display'], 'ADMIN')
self.assertEqual(response.data['finance']['contributions'],2000)
self.assertEqual(response.data['finance']['balance_contributions'],2000)
self.assertEqual(response.data['finance']['total_quota'],1000)
self.assertEqual(response.data['finance']['available_quota'],500)
self.assertEqual(response.data['finance']['utilized_quota'],0)
def test_get_session_user(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': -1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['user']['id'],1)
self.assertEqual(response.data['user']['identification'],99999)
self.assertEqual(response.data['user']['first_name'],'<NAME>')
self.assertEqual(response.data['user']['last_name'],'<NAME>')
self.assertEqual(response.data['user']['email'],'<EMAIL>')
self.assertEqual(response.data['user']['role'],0)
self.assertEqual(response.data['user']['role_display'], 'ADMIN')
self.assertEqual(response.data['finance']['contributions'],2000)
self.assertEqual(response.data['finance']['balance_contributions'],2000)
self.assertEqual(response.data['finance']['total_quota'],1000)
self.assertEqual(response.data['finance']['available_quota'],500)
self.assertEqual(response.data['finance']['utilized_quota'],0)
def test_get_user_not_found(self):
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 2}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_user(self):
response = self.client.delete(
reverse(view_user_detail,kwargs={'id': 2}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.delete(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = User.objects.get(id = 1)
self.assertFalse(user.is_active)
response = self.client.get(
reverse(view_user_detail,kwargs={'id': 1}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_401_UNAUTHORIZED)
try:
self.get_token('<EMAIL>','password')
self.fail('That account is inactive')
except KeyError:
pass
def test_patch_user(self):
self.object_json_user_update['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 2}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = UserProfile.objects.get(id=1)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.last_name,'<NAME>')
self.assertEqual(user.email,'<EMAIL>')
self.assertEqual(user.username,'<EMAIL>')
def test_patch_user_finance(self):
self.object_json_user_update['type'] = 'finance'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 2}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=1)
self.assertEqual(user_finance.contributions, 2000)
self.assertEqual(user_finance.balance_contributions, 2000)
self.assertEqual(user_finance.total_quota, 1000)
self.assertEqual(user_finance.utilized_quota, 500)
self.assertEqual(user_finance.available_quota, 500)
def test_patch_user_not_finance(self):
self.object_json_user_update_same_finance['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_same_finance),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = UserProfile.objects.get(id=1)
self.assertEqual(user.first_name,'<NAME>')
self.assertEqual(user.last_name,'<NAME>')
self.assertEqual(user.email,'<EMAIL>')
self.assertEqual(user.username,'<EMAIL>')
self.object_json_user_update_same_finance['type'] = 'finance'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_same_finance),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=1)
self.assertEqual(user_finance.contributions, 2000)
self.assertEqual(user_finance.balance_contributions, 2000)
self.assertEqual(user_finance.total_quota, 1000)
self.assertEqual(user_finance.utilized_quota, 0)
self.assertEqual(user_finance.available_quota, 500)
@patch.object(MailService, 'send_mail')
def test_patch_user_conflict(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
self.object_json_user_update_identification_r['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_identification_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.object_json_user_update_email_r['type'] = 'personal'
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data=json.dumps(self.object_json_user_update_email_r),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_patch_preferences_not_found(self):
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 111}),
data='{"type": "preferences", "preferences":{"notifications": true}}',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_preferences_notifications(self):
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data='''{"type": "preferences", "preferences":{"notifications": true, "primary_color": "#fff",
"secondary_color": "#000"}}''',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_preference = UserPreference.objects.get(user_id=1)
self.assertTrue(user_preference.notifications, True)
response = self.client.patch(
reverse(view_user_detail,kwargs={'id': 1}),
data='''{"type": "preferences", "preferences":{"notifications": false, "primary_color": "#fff",
"secondary_color": "#000"}}''',
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_preference = UserPreference.objects.get(user_id=1)
self.assertFalse(user_preference.notifications, False)
@patch.object(MailService, 'send_mail')
def test_activation_successful(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':123,
'key':user.key_activation
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,True)
self.assertTrue('pbkdf2_sha256' in user.password)
self.assertIsNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_1(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234,
'key':user.key_activation
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('<PASSWORD>' in user.password)
self.assertIsNotNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_2(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234,
'key': ''
}
response = self.client.post(
reverse(view_user_activate,kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('pbkdf2_sha256' in user.password)
self.assertIsNotNone(user.key_activation)
@patch.object(MailService, 'send_mail')
def test_activation_unsuccessful_3(self, mock):
response = self.client.post(
reverse(view_user),
data = json.dumps(self.object_json),
content_type='application/json',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_201_CREATED)
self.assertTrue(mock.called)
mock.assert_called_once()
user = UserProfile.objects.get(identification = 123)
obj = {
'password':'<PASSWORD>',
'identification':1234
}
response = self.client.post(
reverse(view_user_activate, kwargs={'id': user.id}),
data = json.dumps(obj),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
user = UserProfile.objects.get(identification = 123)
self.assertEqual(user.is_active,False)
self.assertFalse('pbkdf2_sha256' in user.password)
self.assertIsNotNone(user.key_activation)
def create_test_file(self):
try:
file = open("testfile.txt","w")
file.write("1\t100\t1000\t200\t300\r\n")
file.write("2\t400\t1000\t500\t600\r\n")
file.write("3\t700\t1000\t800\t900\r\n")
file.write("4\t0\t0\t0\t0\r\n")
finally:
file.close()
return file
def test_bulk_update_users(self):
for i in range(3):
user = UserProfile.objects.create_user(
id=i+5,
first_name = '<NAME>',
last_name = '<NAME>',
identification = i+1,
username = "<EMAIL>".format(i+1),
email = "<EMAIL>".format(i+1),
password = "password"
)
UserFinance.objects.create(
contributions= 0,
balance_contributions= 0,
total_quota= 0,
available_quota= 0,
utilized_quota=0,
user= user
)
file = {}
created_file = self.create_test_file()
file_reader = open(created_file.name,'r')
file['file'] = file_reader
response = self.client.patch(
reverse(view_user),
data=encode_multipart('file',file),
content_type='multipart/form-data; boundary=file',
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
user_finance = UserFinance.objects.get(user_id=5)
self.assertEqual(user_finance.balance_contributions,100)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,200)
self.assertEqual(user_finance.utilized_quota,300)
self.assertEqual(user_finance.available_quota,700)
user_finance = UserFinance.objects.get(user_id=6)
self.assertEqual(user_finance.balance_contributions,400)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,500)
self.assertEqual(user_finance.utilized_quota,600)
self.assertEqual(user_finance.available_quota,400)
user_finance = UserFinance.objects.get(user_id=7)
self.assertEqual(user_finance.balance_contributions,700)
self.assertEqual(user_finance.total_quota,1000)
self.assertEqual(user_finance.contributions,800)
self.assertEqual(user_finance.utilized_quota,900)
self.assertEqual(user_finance.available_quota,100)
def test_user_apps_not_found(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'noexist'}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_404_NOT_FOUND)
def test_get_users_birthdate(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'birthdates'}),
**self.get_auth_header(self.token)
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data), 11)
for i in range(len(response.data)):
self.assertEqual(response.data[i]['birthdate'], None)
def test_get_powers_exception(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 2
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_get_powers_empty(self):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 2,
'obj': 'requestee'
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 0)
self.assertEqual(response.data['num_pages'], 1)
def test_post_power(self):
self.assertEqual(len(Power.objects.all()), 0)
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
self.assertEqual(len(Power.objects.all()), 1)
def test_get_powers_pagination(self):
self.assertEqual(len(Power.objects.all()), 0)
user = UserProfile.objects.get(identification=1001)
for i in range(11):
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'get',
'page': 1,
'obj': 'requested'
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(len(response.data['list']), 10)
self.assertEqual(response.data['num_pages'], 2)
def test_patch_power_denied(self):
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
power = Power.objects.all()[0]
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'patch',
'id': power.id,
'state': 2
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
power = Power.objects.all()[0]
self.assertEqual(power.state, 2)
@patch.object(MailService, 'send_mail')
def test_patch_power_approved(self, mock):
user = UserProfile.objects.get(identification=1001)
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'post',
'meeting_date': '2020-01-01',
'requestee': user.id
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
power = Power.objects.all()[0]
response = self.client.post(
reverse(view_user_apps, kwargs={'app': 'power'}),
**self.get_auth_header(self.token),
data = json.dumps({
'type': 'patch',
'id': power.id,
'state': 1
}),
content_type='application/json',
)
self.assertEqual(response.status_code,status.HTTP_200_OK)
self.assertEqual(response.data, None)
power = Power.objects.all()[0]
self.assertEqual(power.state, 1)
self.assertTrue(mock.called)
mock.assert_called_once() | en | 0.232156 | {"type": "preferences", "preferences":{"notifications": true, "primary_color": "#fff", "secondary_color": "#000"}} {"type": "preferences", "preferences":{"notifications": false, "primary_color": "#fff", "secondary_color": "#000"}} | 1.9573 | 2 |
examples/test_sftp_storage.py | emanjavacas/casket | 0 | 6618079 | <gh_stars>0
# coding: utf-8
import pytest
from sftp_storage import SFTPStorage
from tinydb import TinyDB, where
element = {'none': [None, None], 'int': 42, 'float': 3.1415899999999999,
'list': ['LITE', 'RES_ACID', 'SUS_DEXT'],
'dict': {'hp': 13, 'sp': 5},
'bool': [True, False, True, False]}
path = 'manjavacas@serv1:~/test.json'
def test_json():
# Write contents
storage = SFTPStorage(path, policy='autoadd')
storage.write(element)
# Verify contents
assert element == storage.read()
def test_json_readwrite():
"""
Regression test for issue #1
"""
# Create TinyDB instance
db = TinyDB(path, policy='autoadd', storage=SFTPStorage)
item = {'name': 'A very long entry'}
item2 = {'name': 'A short one'}
get = lambda s: db.get(where('name') == s)
db.insert(item)
assert get('A very long entry') == item
db.remove(where('name') == 'A very long entry')
assert get('A very long entry') is None
db.insert(item2)
assert get('A short one') == item2
db.remove(where('name') == 'A short one')
assert get('A short one') is None
def test_json_invalid_directory():
with pytest.raises(ValueError):
with TinyDB('/this/is/an/invalid/path/db.json',
policy='autoadd', storage=SFTPStorage):
pass
| # coding: utf-8
import pytest
from sftp_storage import SFTPStorage
from tinydb import TinyDB, where
element = {'none': [None, None], 'int': 42, 'float': 3.1415899999999999,
'list': ['LITE', 'RES_ACID', 'SUS_DEXT'],
'dict': {'hp': 13, 'sp': 5},
'bool': [True, False, True, False]}
path = 'manjavacas@serv1:~/test.json'
def test_json():
# Write contents
storage = SFTPStorage(path, policy='autoadd')
storage.write(element)
# Verify contents
assert element == storage.read()
def test_json_readwrite():
"""
Regression test for issue #1
"""
# Create TinyDB instance
db = TinyDB(path, policy='autoadd', storage=SFTPStorage)
item = {'name': 'A very long entry'}
item2 = {'name': 'A short one'}
get = lambda s: db.get(where('name') == s)
db.insert(item)
assert get('A very long entry') == item
db.remove(where('name') == 'A very long entry')
assert get('A very long entry') is None
db.insert(item2)
assert get('A short one') == item2
db.remove(where('name') == 'A short one')
assert get('A short one') is None
def test_json_invalid_directory():
with pytest.raises(ValueError):
with TinyDB('/this/is/an/invalid/path/db.json',
policy='autoadd', storage=SFTPStorage):
pass | en | 0.698357 | # coding: utf-8 # Write contents # Verify contents Regression test for issue #1 # Create TinyDB instance | 2.454277 | 2 |
test_real.py | astafeev/test666 | 0 | 6618080 | from selenium import webdriver
import time
class Test2():
def test_test1(self):
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.google.com")
x = driver.find_element_by_name("q")
x.send_keys("<NAME>")
x.submit()
time.sleep(3)
driver.quit()
def test_test2(self):
driver = webdriver.Chrome()
driver.get("https://ukr.net")
time.sleep(3)
driver.quit()
| from selenium import webdriver
import time
class Test2():
def test_test1(self):
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.google.com")
x = driver.find_element_by_name("q")
x.send_keys("<NAME>")
x.submit()
time.sleep(3)
driver.quit()
def test_test2(self):
driver = webdriver.Chrome()
driver.get("https://ukr.net")
time.sleep(3)
driver.quit()
| none | 1 | 2.741321 | 3 | |
ifdflow/ifdflow_notebook_22_08_2019.py | tamir-dingjan/ifdflow | 0 | 6618081 | <filename>ifdflow/ifdflow_notebook_22_08_2019.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import glob
import subprocess
import time
import math
import shutil
import pathos.pools as pp
import numpy as np
import pandas as pd
import mdtraj as md
import networkx as nx
from sklearn import metrics
import matplotlib.pyplot as plt
from adjustText import adjust_text
import seaborn as sns
# # Notes
# This notebook will contain a class and methods that enable the automatic refinement of homology models for virtual screening. The process will require the following:
# 1. A retrospective screening library containing known actives and decoys.
# 2. Selection criteria for the pose to converge upon, consisting of a ligand identity and the number of top-ranked models required to converge. When this pose is formed by the requested number of the top-ranked models, the refinement ends.
# 3. Binding site location.
#
# The following methods will be defined:
# 1. Screening - to conduct the screen of the specified library at a model in the selected binding site location.
# 2. Minimization - to minimize the binding site residues around the specified convergence pose.
# 3. Enrichment - to calculate the enrichment metrics for a completed screen.
# 4. Convergence - to detect whether the top-ranked models from a generation have converged.
# 5. Refine - a wrapper method to perform the minimization and screening methods at the generation level, since the entire generation is refined simultaneously.
#
# The classes used to structure this process:
#
# "Generation" - a collection of "Screens". The Convergence method operates at the "Generation" level.
#
# "Screen" - a single screening result, consisting of a protein with a list of hits retrieved by the protein. Each screening result will have its enrichment metrics associated with it, so that the top-ranked models can be easily selected. The "Screening" and "Enrichment" methods operate at the "Screen" level. In particular, the "Screening" method populates a "Screen" by running a virtual screening calculation on the "Screen" object's protein model.
#
# "Model" - contains a single protein conformation. This object will have the "Minimization" method as well as methods for exporting the protein coordinates.
#
# # Thoughts
# Do I need to build a job monitor to make this work? Some kind of process that can schedule jobs and report the results back when the jobs finish? Otherwise, the whole script could be stuck waiting for a single docking job to finish.
#
# Definitely need a way to schedule multiple jobs simultaneously. Could do this with a mix of manual and automated - for example, could write out a big file of Schrodinger terminal commands and then just manually give that to the Schrodinger server in Bash. Then, could re-run this notebook on the output of those jobs when they finish - that way, the notebook does the heavy lifting of checking for convergence and writing job submission files, but doesn't have to be aware of when the jobs finish. An added bonus, the notebook won't be able to automatically submit jobs so things won't get out of hand by accident.
#
# This means that a Screen object should be able to be created from a .maegz file, for easy input of the completed jobs when they finish. Simple way to do this is to have two kinds of Screen object - a ScreenConstructor, and a ScreenEvaluator
# I'd like to be able to do the convergence check within MDTraj, without needing to run the RMSD calculations through Maestro. But, this might cause problems with the binding site location when starting a new generation, because MDTraj and Maestro might shift around coordinate spaces / residue numbering. Watch out for this. So far the coordinate space seems to remain consistent.
# File Nomenclature:
#
# Models are named by Generation, Screen, and Model: "0-1-2.pdb"
#
# Screens are named by Generation and Screen: "0-1_{grid/dock}.maegz"
# In[ ]:
# In[2]:
class Model:
"""The protein-ligand complex coordinates, with a minimize method.
Attributes
----------
pose : mdtraj.Trajectory object
"""
def __init__(self, filename, ligand):
"""
Initialise the Model with an MDTraj Trajectory object created
from the given Maestro file.
The input file must contain multiple entries, with the first entry
containing just the protein and subsequent entries containing ligands.
This is the default structure for docking output .maegz files.
The protein-only entry is extracted using 'maesubset', and is used in
grid generation.
The MDTraj Trajectory object, however, must contain a ligand to enable
centroid position calculations. So, the complex must be assembled before
loading into MDTraj - this is accomplished by 'structcat'. To do this,
the desired ligand file has to be extracted from the docking output .maegz.
This file is identified by the Maestro Title string, extracted using
'maesubset' and merged. Currently, the ligand chosen is just the second
entry.
Parameters
----------
filename : str
Filename for the ligand-protein coordinates. The file must
be a Maestro format .maegz file containing a single protein molecule
as the first entry, and any number of ligand molecules in
subsequent entries.
ligand : str
Title of the ligand to be used for assembling the complex.
This ligand will feature in the model minimization, and so
is an important choice as it will guide the model architecture
during refinement. The string must correspond to the Maestro
title of the ligand entry in the input pose-viewer file. This field
is termed 's_m_title' in Maestro's internal referencing available
with the proplister tool. If the string 'noligand' is provided,
the model will be setup with the first ligand listed in the docking
input file.
"""
# Cast the provided filename as an absolute path to enable processing in
# the same directory
self.input_file = os.path.realpath(filename)
self.name = os.path.splitext(os.path.basename(self.input_file))[0]
self.output_dir = os.path.dirname(self.input_file) + "/"
self.load_status = False
# The Model is useless without a ligand. So, check if the specified ligand
# is available within the input.
# However, when assembling a new model (i.e., not one from docking output)
# a complex with the desired ligand may not be available. In this case,
# use the 'noligand' title as a manual override of sorts to force
# model loading.
self.ligand = ligand
if (self.ligand == 'noligand'):
# If the self.ligand attribute is set to 'noligand', then this model
# does not come from a docking run containing the desired convergence
# ligand. Therefore, all we really need from the ligand is the know
# the binding site location. The first ligand in the file will do.
# Use the proplister function to get this information, and update
# the self.ligand attribute to have the new ligand name.
entry_titles = subprocess.run(["/home/data/Schrodinger2018_3/utilities/proplister",
"-p",
"s_m_title",
self.input_file], text=True, capture_output=True)
self.ligand = entry_titles.stdout.rstrip("\n").split('\n')[3].strip()
checkligand = subprocess.check_output(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-title",
self.ligand,
self.input_file],
cwd = os.path.dirname(self.input_file))
# checkligand_p = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
# "-title",
# self.ligand,
# self.input_file],
# stdout = checkligand,
# cwd = os.path.dirname(self.input_file))
# checkligand_p.wait()
if (checkligand == ''):
self.ligandmatch = False
else:
self.ligandmatch = True
def load(self):
"""
Run the Model's file-conversion routines and load the MDTraj object.
This will perform multiple operations:
1. The protein structure is extracted to <self.name>-protein.mae
2. All entries matching the provided ligand name are saved to
<self.name>-matchedligands.mae.
3. The ligand structure is extracted to <self.name>-ligand.mae
4. The protein-ligand complex is assembled to <self.name>-merge.mae.
This file contains two entries in the Maestro file.
5. The protein-ligand complex file is merged to a single Maestro entry,
saved to <self.name>-complex.mae.
6. The merged complex is saved to PDB to allow reading by MDTraj, saved
to <self.name>-complex.pdb.
The resulting MDTraj Trajectory object is stored in self.pose.
"""
# Extract the protein from the pose-viewer format multi-entry .maegz
# ASSUMPTION: This step assumes that the first entry in the pose-viewer file
# is the receptor. Usually, this will be the case.
if (self.ligandmatch == False):
print("No ligand match for this model - cancel loading.")
return
with open(self.output_dir + self.name + "-protein.mae", 'w') as f:
extractprotein = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-n",
"1",
self.input_file],
stdout = f,
cwd = os.path.dirname(self.input_file))
with open(self.output_dir + self.name + "-matchedligands.mae", 'w') as f:
fetchligands = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-title",
self.ligand,
self.input_file],
stdout = f,
cwd = os.path.dirname(self.input_file))
# With no delay, the script doesn't seem able to read the -matchedligands.mae file.
# Subsequent execution with the above block commented out works as normal.
# Perhaps it just takes a little bit of time to write the file?
# Try using a 5 sec delay. Works. 1 second also works, but sometimes fails.
#time.sleep(3)
# Alternatively, try waiting for the process to terminate
fetchligands.wait()
with open(self.output_dir + self.name + "-ligand.mae", 'w') as f:
extractligand = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-n",
"1",
self.output_dir + self.name + "-matchedligands.mae"],
stdout = f,
cwd = os.path.dirname(self.input_file))
merge = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/structcat",
"-imae",
self.output_dir + self.name + "-protein.mae",
"-imae",
self.output_dir + self.name + "-ligand.mae",
"-omae",
self.output_dir + self.name + "-complex_pv.mae"])
complexation = subprocess.call(["/home/data/Schrodinger2018_3/run",
"pv_convert.py",
"-mode",
"merge",
self.output_dir + self.name + "-complex_pv.mae"])
writepdb = subprocess.call(["/usr/people/mashagr/tamird/bin/silico1.14/bin/mol_combine",
self.output_dir + self.name + "-protein.mae",
self.output_dir + self.name + "-ligand.mae",
"-O",
self.output_dir + self.name + "-complex.pdb",
"-o",
"pdb",
"-dr"])
try:
self.pose = md.load(self.output_dir + self.name + "-complex.pdb")
except IOError:
print("Could not load file: " + self.output_dir + self.name + "-complex.pdb")
self.complexfile = self.output_dir + self.name + "-complex-out_complex.mae"
self.protfile = self.output_dir + self.name + "-protein.mae"
self.load_status = True
def get_ligand_resid(self):
"""Use the MDTraj Trajectory object to automatically identify the ligand
residue name. This process relies on the ligand consisting of a single
molecule possessing a single residue name. Single-atom molecules are
explicitely ignored for this step, as some protein hydrogen atoms are
not always correctly identified as protein. The ligand residue is then
selected as the residue possessing the smallest number of atoms.
Returns
-------
str
A string containing the residue name of the ligand
"""
molecules = list(nx.connected_components(self.pose.topology.to_bondgraph()))
molecules.sort(key=len)
largemols = [m for m in molecules if (len(m)>1)]
ligand_resid = str(list(set([atom.residue.resSeq for atom in largemols[0]]))[0])
return ligand_resid
def get_molecules(self):
"""Return the molecules detected by MDTraj.
Returns
-------
list
The length of the list corresponds to the number of molecules
detected. Each element of the list is the number of atoms in each
molecule
"""
molecules = list(nx.connected_components(self.pose.topology.to_bondgraph()))
molecules.sort(key=len)
return molecules
def get_ligand_centroid(self):
"""Use the MDTraj Trajectory object to find the centroid of the ligand.
Returns
-------
list
A three-element list containing the X, Y, and Z coordinates of the
position corresponding to the centroid of the ligand atoms.
"""
ligand_atoms = self.pose.topology.select("resSeq "+self.get_ligand_resid())
centroid = np.mean(self.pose.xyz[0][ligand_atoms], axis=0)*10
return list(centroid)
def minimize(self):
"""Write the Schrodinger command to minimize the ligand-protein
complex contained in the model object.
This method tries to automatically identify the ligand. For this,
the PDB-export version of the original Maestro file is used,
as MDTraj cannot read Maestro formats.
Parameters
----------
name : str
The name of the Maestro cordinate file to be used, excluding
the file path and extension. The minimization command will use
this file, and the options file is also named using this string.
Returns
-------
string
A shell command to use the Schrodinger suit to minimize the
model. This command can be executed any time, using the coordinate
file written. The command references an options file, which is
written to the disk using the name parameter
"""
# Write the minimization job input file
# This can be executed using the following in the terminal:
# >>$SCHRODINGER/prime jobname
# Identify the ligand
ligand_resid = self.get_ligand_resid()
options = ["STRUCT_FILE\t", os.path.basename(self.complexfile),
"\nJOB_TYPE\tREFINE",
"\nPRIME_TYPE\tSITE_OPT",
"\nSELECT\tasl = fillres within 5.000000 ( res.num ", ligand_resid, ")",
"\nLIGAND\tasl = res.num ", ligand_resid,
"\nNPASSES\t1",
"\nINCRE_PACKING\tno",
"\nUSE_CRYSTAL_SYMMETRY\tno",
"\nUSE_RANDOM_SEED\tno",
"\nSEED\t0",
"\nOPLS_VERSION\tOPLS3e",
"\nEXT_DIEL\t80.00",
"\nUSE_MEMBRANE\tno"]
jobname = self.name + "-min"
with open(self.output_dir + jobname + ".inp", 'w') as f:
f.write(''.join(options))
#command = "/home/data/Schrodinger2018_3/prime " + jobname
return self.output_dir + jobname + ".inp"
# In[35]:
testfile = "/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/icb_1-out_pv.mae"
x = Model(testfile, "noligand")
# In[36]:
x.ligand
# In[27]:
entry_titles = subprocess.run(["/home/data/Schrodinger2018_3/utilities/proplister",
"-p",
"s_m_title",
testfile], text=True, capture_output=True)
# In[28]:
type(entry_titles.stdout)
# In[29]:
entry_titles.stdout
# In[31]:
entry_titles.stdout.rstrip("\n").split('\n')[3].strip()
# In[ ]:
self.ligand = entry_titles.rstrip("\n").split('\n')[3].strip()
# In[ ]:
# In[3]:
class Grid:
def __init__(self, filename):
"""
In some cases, protein structure files may be generated without
any bound or docked ligands. Generating a grid from these structures
requires identifying a binding site independantly of any bound
ligand.
This can be done by specifying a set of residues defining the borders
of the site and defining an equidistant center.
Parameters
----------
filename : str
PDB file containing a single protein without any ligands.
"""
self.input_file = os.path.realpath(filename)
self.name = os.path.splitext(self.input_file)[0].split('/')[-1]
self.output_dir = os.path.dirname(self.input_file) + "/"
self.maename = self.name.split('Q9NYV8.B99990')[-1]
try:
self.t = md.load(self.input_file)
except IOError:
print("Could not load file: " + self.input_file)
def find_site_center(self, selected_residues):
"""
Identify the center of the provided residues.
Parameters
----------
selected_residues : lst
Collection of residue index numbers drawn from
the topology of the file.
"""
try:
self.residue_centers = [np.mean(self.t.xyz[0][self.t.topology.select("resSeq "+str(i))], axis=0) for i in selected_residues]
self.site_center = np.mean(np.asarray(self.residue_centers), axis=0) * 10
except IndexError:
print("Unable to select residue "+str(i)+". Site not found.")
def convert_to_mae(self):
"""
Convert the input file to a MAE format file.
"""
convert = subprocess.call(["/home/data/Schrodinger2019_1/utilities/structconvert",
"-ipdb",
self.input_file,
"-omae",
self.output_dir + self.name + ".mae"])
def generate_grid(self):
"""
Write the grid.in file for running the grid generation job.
"""
options = ["GRID_CENTER\t", str(self.site_center[0]), ",", str(self.site_center[1]), ",", str(self.site_center[2]), ",",
"\nGRIDFILE\t", self.name + "-grid.zip",
"\nINNERBOX\t10, 10, 10",
"\nOUTERBOX\t25, 25, 25",
"\nRECEP_FILE\t", "out_" + str(self.maename) + ".mae",
"\n"]
grid_file = self.output_dir + self.name + "-grid.in"
with open(grid_file, 'w') as f:
f.write(''.join(options))
return grid_file
# In[177]:
g = Grid("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/Q9NYV8.B99990001.pdb")
# In[178]:
g.find_site_center([69, 89, 175])
# In[179]:
g.residue_centers
# In[180]:
g.site_center
# In[87]:
g.convert_to_mae()
# In[88]:
g.generate_grid()
# In[181]:
models = glob.glob("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/*pdb")
# In[182]:
for m in models:
g = Grid(m)
g.find_site_center([89, 69, 175])
g.generate_grid()
# In[92]:
gridjobs = glob.glob("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/screening/multi_align_2/*grid*in")
# In[171]:
t = md.load("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/Q9NYV8.B99990001.pdb")
# In[175]:
for i in t.topology.select("resSeq 89 69 175"):
print t.topology.atom(i).residue
# In[ ]:
# In[3]:
class IFDProcessor:
def __init__(self, filename):
"""Given a Maestro file containing the results of an Induced Fit Docking calculation
(i.e., multiple entries each containing a protein and ligand), produce a "docking output"
style of Maestro file (i.e., containing a single protein molecule as the first entry,
followed by another entry for the ligand).
Parameters
----------
name : str
Filename for the IFD results file. The file must
be a Maestro format .maegz file containing
a single entry, consisting of a ligand-protein complex.
"""
self.input_file = os.path.realpath(filename)
self.output_file = "No output yet"
def run(self):
"""Execute the conversion of the IFD docking results file into a Maestro Pose Viewer
file.
"""
# Split the IFD file into protein and ligand
# Use subprocess.Popen to allow specifying of the directory
split = subprocess.Popen(["/home/data/Schrodinger2019_1/run",
"pv_convert.py",
"-mode",
"split_pv",
self.input_file,
"-lig_last_mol"],
cwd=os.path.dirname(self.input_file))
output_name = os.path.splitext(os.path.basename(self.input_file))[0] + "-out_pv" + os.path.splitext(os.path.basename(self.input_file))[1]
self.output_file = os.path.dirname(self.input_file) + "/" + output_name
# In[12]:
ifds = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*mae")
# In[13]:
len(ifds)
# In[18]:
x = IFDProcessor(ifds[0])
# In[19]:
x.run()
# In[20]:
x.output_file
# In[21]:
for i in ifds:
x = IFDProcessor(i)
x.run()
# In[ ]:
# In[4]:
class ScreenConstructor:
def __init__(self, model_path, ligand, library_path):
"""Initialise the ScreenConstructor with the paths to the ligand-protein
model and retrospective screening library files. The model path is
used to add a Model object.
Parameters
----------
model_name : str
Name of the docking output .maegz file, beginning with an entry
consisting of a single molecule protein and followed by at least one
entry consisting of a single molecule ligand
ligand : str
Label for the ligand used to load the model.
library_path : str
Path to the retrospective screening library, containing the actives
and decoys to be screened.
"""
self.model = Model(model_path, ligand)
self.library_path = os.path.realpath(library_path)
self.output_dir = os.path.dirname(os.path.realpath(model_path)) + "/"
def gridgen(self):
"""
Write the Schrodinger input files and return the name of the grid.in file.
"""
# Load the Model's coordinates - includes a check for ligand presence
if (self.model.load_status == False):
self.model.load()
# Identify the ligand centroid
ligand_centroid = self.model.get_ligand_centroid()
# Grid generation options
options = ["GRID_CENTER\t", str(ligand_centroid[0]), ",", str(ligand_centroid[1]), ",", str(ligand_centroid[2]), ",",
"\nGRIDFILE\t", self.model.name + "-grid.zip",
"\nINNERBOX\t10, 10, 10",
"\nOUTERBOX\t25, 25, 25",
"\nRECEP_FILE\t", os.path.basename(self.model.protfile),
"\n"]
grid_file = self.output_dir + self.model.name + "-grid.in"
with open(grid_file, 'w') as f:
f.write(''.join(options))
return grid_file
def dock(self):
"""Write the Schrodinger input files and return the execution command to perform
screening, using the created grid and the specified library file.
"""
options = ["GRIDFILE\t", self.model.name + "-grid.zip",
"\nLIGANDFILE\t", os.path.basename(self.library_path),
"\nPRECISION\tSP",
"\n"]
dock_file = self.output_dir + self.model.name + "-dock.in"
with open(dock_file, 'w') as f:
f.write(''.join(options))
return dock_file
# In[5]:
class ScreenEvaluator:
def __init__(self, model_path, ligand):
"""Initialise the ScreenEvaluator with the screening results file. This populates
the self.results object. The Model object is not populated upon initialization, to
allow rapid inspection of ScreenEvaluator objects.
The self.results object contains the hitlist from the screening experiment as a
Pandas dataframe, specifying the Maestro Title, Docking Score, and Decoy status
of each hit.
Parameters
----------
model_path : str
Name of the docking output .maegz file, beginning with an entry
consisting of a single molecule protein and followed by at least one
entry consisting of a single molecule ligand
ligand : str
Name of the ligand used to load the model. This is passed directly to
the Model() class method. Options include any string corresponding
to a ligand title, and 'noligand', which triggers automatic identification
of the ligand. Alternatively, to bypass model loading, can pass 'nomodel'
"""
# Extract the project table results from the docking output file
self.input_file = os.path.realpath(model_path)
self.name = os.path.splitext(os.path.basename(self.input_file))[0]
self.output_dir = os.path.dirname(self.input_file) + "/"
# Model loading is a slow step, and not required if a hitlist
# has already been written (usually)
if not (ligand == 'nomodel'):
self.model = Model(model_path, ligand)
self.results = ''
self.metrics = {}
def write_hitlist(self):
"""Write the docking hitlist to disk. The hitlist is saved as a .csv
with the following fields:
s_m_title : The Maestro title field for each entry.
r_i_docking_score : Docking score for each hit.
s_user_RetroScreenRole : type of compound (active/decoy)
This data is loaded into self.results as a Pandas dataframe.
"""
with open(self.output_dir + self.name + "-results.csv", 'w') as f:
extractprotein = subprocess.call(["/home/data/Schrodinger2019_1/utilities/proplister",
"-c",
"-p",
"s_m_title",
"-p",
"r_i_docking_score",
"-p",
"s_user_RetroScreenRole",
"-p",
"s_user_CompoundSet",
self.input_file],
stdout = f)
def compare_score(self, active_score, decoy_score):
"""Function used in AUC calculation to build a score tally based on
the relationship between an active score and a decoy score.
Currently, this function is set to assume that lower scores are better.
Accordingly, the AUC is increased when an active has a lower score
than a decoy.
Parameters
----------
active_score : float
A docking score for an active compound.
decoy_score : float
A docking score for a decoy compound.
"""
if active_score < decoy_score:
return float(1)
elif active_score > decoy_score:
return float(0)
elif active_score == decoy_score:
return float(0.5)
def calc_auc(self, actives_scores, decoys_scores, n_actives, n_decoys):
"""Calculate the ROC AUC for a set of actives and decoys.
This routine scales the AUC number depending on how many of the actives
and decoys present in the input are present in the score lists.
The user provides the number of actives and decoys originally used
as input to the screen, and the number of items in the actives_scores
and decoys_scores lists are used to determine the scaling of the AUC.
Parameters
----------
actives_scores : list
List of docking scores for actives
decoys_scores : list
List of docking scores for decoys
n_actives : int
Number of active compounds used as input to this screen
n_decoys : int
Number of decoy compounds used as input to this screen
Returns
-------
auc : float
A scaled AUC that reflects the probability that an active will
score better than a decoy.
"""
n_a_found = float(len(actives_scores))
n_d_found = float(len(decoys_scores))
n_a_input = float(n_actives)
n_d_input = float(n_decoys)
score_compare_tally = float(0)
for a in actives_scores:
for d in decoys_scores:
score_compare_tally += self.compare_score(a, d)
auc_raw = float(score_compare_tally / (n_a_found * n_d_found))
auc_scaled = float((auc_raw * (n_a_found / n_a_input) * (n_d_found / n_d_input)) + ((1 - (n_d_found/n_d_input)) * (n_a_found/n_a_input)))
return auc_scaled
def calc_ef_and_sd(self, threshold, n_actives, n_decoys):
"""Calculate the enrichment factor at the selected threshold.
The EF is calculated as the ratio between an arbitrarily-selected
fraction of inactives and the fraction of actives retrieved at that
fraction of inactives. This definition also corresponds to the
gradient of a line constructured from the origin to the ROC-curve value
coordinate at the selected inactive fraction.
Parameters
----------
threshold : float
The fraction of inactives at which to determine the EF.
I.e., EF1% corresponds to a threshold of 0.01.
n_actives : int
Number of actives used as input to the screen
n_decoys : int
Number of decoys used as input to the screen.
Returns
-------
ef : float
The enrichment factor.
sd : float
The analytical error of the enrichment factor
"""
for (i, a) in zip(self.roclist_x, self.roclist_y):
if i > threshold:
ef = float(a/i)
break
s = (a*np.log(a)) / (i*np.log(i))
sd = (((a*(1-a))/float(n_actives)) + ((s**2)*i*(1-i))/n_decoys) / (i**2)
return (ef, sd)
def calc_ci95(self, variance):
"""Calculates a 95% confidence interval for any metric, with the
assumption that the distribution is normal.
Caveat from the original paper:
"This will break down for systems with very large errors (generally
due to having very few actives or decoys) ... When large errors do
arise the exact magnitude of the error is generally less relevant than
the fact that the error is large"
Parameters
----------
variance : float
The variance of any metric
Returns
-------
ci95 : float
The 95% confidence interval. Adding and subtracting this value
from the mean value of the metric will give the values that define
the borders of 95% confidence for that value.
"""
ci95 = 1.96 * variance**0.5
return ci95
def precision_recall(self, hitlist):
"""Calculate the precision-recall profile and associated summary statistics
(F1 score, Matthews correlation quality, Youden's J, PR-AUC).
The summary statistics are defined for every point along the precision-recall
curve, so will report the maximal value.
Parameters
----------
hitlist : list
A list containing 'active' and 'decoy' entries. Can be generated from the self.results.role
Pandas sheet column.
"""
self.precision = []
self.recall = []
self.f1 = []
self.mcc = []
self.youdenj = []
# Move the threshold through the hitlist
for i in range(1, len(hitlist)):
toplist = hitlist[0:i]
bottomlist = hitlist[i:]
tp = 0
fp = 0
tn = 0
fn = 0
# At each step, calculate the precision and recall
for x in toplist:
if x == 'active':
tp += 1
elif x == 'decoy':
fp += 1
for x in bottomlist:
if x == 'decoy':
tn += 1
elif x == 'active':
fn += 1
#print len(toplist), len(bottomlist), "TP: ", tp, "FP: ", fp, "TN: ", tn, "FN: ", fn
precision = float(tp) / float(tp + fp)
recall = float(tp) / float(tp + fn)
self.precision.append(precision)
self.recall.append(recall)
# Calculate the summary statistics
f1 = np.reciprocal(((np.reciprocal(recall)) + (np.reciprocal(precision)))/2)
self.f1.append(f1)
mcc = float((tp * tn) - (fp * fn)) / float(math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
self.mcc.append(mcc)
youdenj = (float(tp)/float(tp+fn)) + (float(tn)/float(tn+fp)) - 1
self.youdenj.append(youdenj)
# Calculate PRAUC
self.metrics['prauc'] = metrics.auc(self.recall, self.precision)
# Store the summary statistics
self.metrics['f1'] = max(self.f1)
self.metrics['mcc'] = max(self.mcc)
self.metrics['youden_j'] = max(self.youdenj)
# Store the score cutoff associated with the maximum Youden's J index
self.metrics['youden_j_score'] = self.results['score'][self.youdenj.index(max(self.youdenj))]
def calc_enrichment(self, n_actives, n_decoys):
"""Calculate enrichment metrics for this screen. The metrics are stored in the
self.metrics dictionary object.
Parameters
----------
n_actives : int
The number of actives in the input library used to perform the screen.
n_decoys : int
The number of decoys in the input library used to perform the screen.
"""
# Load hitlist
try:
self.results = pd.read_csv(self.output_dir + self.name + "-results.csv", names = ['title', 'score', 'role', 'compoundset'], skiprows = [0,1])
except IOError:
print("File not found: " + self.output_dir + self.name + "-results.csv")
return
# Remove duplicate items from the hitlist.
self.results.drop_duplicates(subset='title', keep="first", inplace=True)
# Extract score lists for actives and decoys
self.actives_scores = self.results[self.results["role"] == 'active'].score.tolist()
self.decoys_scores = self.results[self.results["role"] == 'decoy'].score.tolist()
self.roclist = self.results.role.tolist()
# Prepare ROC lists
x = [0]
y = [0]
d_count = 0
a_count = 0
for i in self.roclist:
if i == 'decoy':
d_count += 1
elif i == 'active':
a_count += 1
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
self.roclist_x = np.asarray(x, dtype=float)/float(d_count)
self.roclist_y = np.asarray(y, dtype=float)/float(a_count)
# Populate the self.enrichment object with the metrics
self.metrics['AUC'] = self.calc_auc(self.actives_scores, self.decoys_scores, n_actives, n_decoys)
q1 = self.metrics['AUC'] / float(2-self.metrics['AUC'])
q2 = (2*(self.metrics['AUC'])**2) / (1 + self.metrics['AUC'])
self.metrics['AUC_SD'] = (self.metrics['AUC']*(1-self.metrics['AUC']) + ((n_actives-1)*(q1-(self.metrics['AUC'])**2)) + ((n_decoys-1)*(q2-(self.metrics['AUC'])**2))) / float(n_actives * n_decoys)
self.metrics['EF1'], self.metrics['EF1_SD'] = self.calc_ef_and_sd(0.01, n_actives, n_decoys)
self.metrics['EF2'], self.metrics['EF2_SD'] = self.calc_ef_and_sd(0.02, n_actives, n_decoys)
self.metrics['EF5'], self.metrics['EF5_SD'] = self.calc_ef_and_sd(0.05, n_actives, n_decoys)
self.metrics['EF10'], self.metrics['EF10_SD'] = self.calc_ef_and_sd(0.10, n_actives, n_decoys)
self.metrics['AUC_CI95'] = self.calc_ci95(self.metrics['AUC_SD'])
self.metrics['EF1_CI95'] = self.calc_ci95(self.metrics['EF1_SD'])
self.metrics['EF2_CI95'] = self.calc_ci95(self.metrics['EF2_SD'])
self.metrics['EF5_CI95'] = self.calc_ci95(self.metrics['EF5_SD'])
self.metrics['EF10_CI95'] = self.calc_ci95(self.metrics['EF10_SD'])
def plot_roc(self, color):
"""Plot the ROC curve and save to a file named <self.name>_roc.png.
"""
x = [0]
y = [0]
d_count = 0
a_count = 0
for i in self.roclist:
if i == 'decoy':
d_count += 1
elif i == 'active':
a_count += 1
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
x_scale = np.asarray(x, dtype=float)/float(d_count)
y_scale = np.asarray(y, dtype=float)/float(a_count)
plt.figure(figsize=(3,3))
sns.set(context="notebook", style="ticks")
plt.plot([0,1], [0,1], color=sns.xkcd_rgb["grey"], linestyle='--')
plt.plot(x_scale, y_scale, sns.xkcd_rgb[color])
plt.ylim(0,1)
plt.ylabel("Actives")
plt.xlim(0,1)
plt.xlabel("Inactives")
plt.savefig(self.output_dir + self.name + "_roc.png", dpi=300, format="png", bbox_inches="tight", transparent=True)
def plot_roc_marked(self, color, watch_compound, watch_role):
"""Plot the ROC curve and add text labels to compounds
specified.
Parameters
----------
color : str
The colour to plot the ROC curve in.
watch_compound : str
Text label to match against the "compoundset" property for
selecting the marked compounds.
watch_role : str
Text label to match against the "role" property for selecting
the marked compounds.
"""
x = [0]
y = [0]
marked_y = []
marked_x = []
marked_labels = []
d_count = 0
a_count = 0
for i in range(0, len(self.results)):
if (self.results.iloc[i]['role'] == 'decoy'):
d_count += 1
elif (self.results.iloc[i]['role'] == 'active'):
a_count += 1
if (self.results.iloc[i]['compoundset'] == watch_compound) and (self.results.iloc[i]['role'] == watch_role):
marked_y.append(a_count)
marked_x.append(d_count)
marked_labels.append(self.results.iloc[i]['title'])
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
x_scale = np.asarray(x, dtype=float)/float(d_count)
y_scale = np.asarray(y, dtype=float)/float(a_count)
marked_x_scale = np.asarray(marked_x, dtype=float)/float(d_count)
marked_y_scale = np.asarray(marked_y, dtype=float)/float(a_count)
plt.figure(figsize=(6,6))
sns.set(context="notebook", style="ticks")
plt.plot([0,1], [0,1], color=sns.xkcd_rgb["grey"], linestyle='--')
plt.plot(x_scale, y_scale, sns.xkcd_rgb[color])
plt.scatter(marked_x_scale, marked_y_scale, color='k', marker='x', s=50)
texts = []
align = 'right'
for index,(i,j) in enumerate(zip(marked_x_scale, marked_y_scale)):
if (align == 'right'):
texts.append(plt.text(i+.02, j-0.01, marked_labels[index]))
align = 'left'
elif (align == 'left'):
texts.append(plt.text(i-0.02*len(marked_labels[index]), j-0.01, marked_labels[index]))
align = 'right'
plt.ylim(0,1)
plt.ylabel("Actives")
plt.xlim(0,1)
plt.xlabel("Inactives")
adjust_text(texts)
plt.savefig(self.output_dir + self.name + "_roc_" + watch_role + "_" + watch_compound + ".png", dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.close()
def plot_pr(self, color):
"""Plot the Precision-Recall curve.
"""
plt.figure(figsize=(7,7))
sns.set(context="notebook", style="ticks")
f_scores = np.linspace(0.2, 0.8, num=4)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y>=0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
plt.plot(self.recall, self.precision, sns.xkcd_rgb[color])
plt.ylim(0,1)
plt.ylabel("Precision")
plt.xlim(0,1)
plt.xlabel("Recall")
plt.savefig(self.output_dir + self.name + "_pr.png", dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.close()
# In[41]:
x = "/home/data/mashas/tamir/T2R14_gmeiner/library3/ifd_1743-out_pv-dock_pv.maegz"
# In[42]:
s = ScreenEvaluator(x, "nomodel")
# In[43]:
n_actives = 193
n_decoys = 731
# In[44]:
s.calc_enrichment(n_actives, n_decoys)
s.precision_recall(s.roclist)
df = pd.DataFrame(s.metrics, index=[0])
df['screen'] = x
# In[45]:
s.results['score'][s.youdenj.index(max(s.youdenj))]
# In[46]:
max(s.youdenj)
# In[48]:
df.to_csv("/home/data/mashas/tamir/T2R14_gmeiner/fastrocs/top1-1743-data.csv")
# # Putting it all together
# A generation object can be a wrapper for all of the functions that need to be run on all the screens in a generation. These are:
# 1. Make receptor grids
# 2. Dock the library
# 3. Save enrichment metrics
# 4. Minimize receptors in complex with convergence ligands
#
# One important thing to include is a way to check which steps finish successfully for each member. When there's an error, it would be convenient to have a simple way to re-run a step only on the ones that failed.
#
# Also, a better way to schedule jobs with a "check-in, check-out" system. At the moment, there's an arbitrarily coded delay time to make sure the license server has time to update the number of jobs licenses currently using a license to ensure that we don't submit a job when there are no licenses available. However, this doesn't allow for fast and accurate submission, so the load-balancing is inefficient (especially for minimization jobs). A system which keeps track of which jobs have started, are running, and have finished would be able to more finely manage this process.
# In[59]:
class Generation:
"""
The Generation object indexes a collection of screens and provides a wrapper for functions
that are run on all the screens.
"""
def __init__(self, n_processes, members, library, n_actives, n_decoys):
"""
Initialise a Generation object with the provided models. These can be docking output files
(.maegz) or pregenerated grid (.zip) files.
Parameters
----------
n_processes : int
Number of simultaneous processes through which to run.
members : list
List of screen filepaths.
library : str
Filepath for the screening library.
n_actives : int
Number of actives.
n_decoys : int
Number of decoys.
"""
self.n_processes = n_processes
self.members = [os.path.realpath(x) for x in members]
self.workdir = os.path.dirname(self.members[0]) + "/"
self.library = os.path.realpath(library)
self.n_actives = n_actives
self.n_decoys = n_decoys
self.pool = pp.ProcessPool(self.n_processes)
def pool_min(self, min_input):
"""
Parallel utility to write the minimization job input.
"""
(x, ligand) = min_input
m = Model(x, ligand)
# If the model can't load, there is no pose for the specified ligand.
# If the ligand isn't found in the file, then no pose for the ligand.
# In these cases, don't want to run the minimization job, so don't
# return any filename.
m.load()
if (m.load_status & m.ligandmatch):
min_name = m.minimize()
return min_name
def write_minimize(self, ligand):
"""
Write the minimization run files for the generation members.
Parameters
----------
ligand : str
Title for the ligand to pass to Model.
"""
min_input = [(x, ligand) for x in self.members]
if not (os.path.splitext(self.members[0])[-1] == '.zip'):
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_min, min_input)
self.pool.close()
self.mins = output
else:
print("The loaded models are pregenerated grids (.zips) - No minimization files written.")
def prepare_next_generation(self, pattern, name):
"""
Rename the minimization run files to specify the names for the next generation of model files
that minimization produces. If the minimization output file already exists, or if
the required input file is not present, the minimization run file is not renamed but rather
stored in self.min_exists or self.min_unprepared, respectively.
Parameters
----------
name : str
Label to insert into the filename
"""
self.min_exists = []
self.min_unprepared = []
self.next_gen = []
for x in self.mins:
if (x != None):
ext = os.path.splitext(x)[-1]
rename = os.path.basename(x).split(pattern)[0]+name+"-min"+ext
output = os.path.basename(x).split(pattern)[0]+name+"-min-out.maegz"
# Check if the minimization parameter file already exists
if (len(glob.glob(self.workdir+output)) == 1):
self.min_exists.append(self.workdir+rename)
# Check if the required input file is present
elif (len(glob.glob(self.workdir+os.path.basename(x).split(pattern)[0]+"-*complex.mae")) == 0):
self.min_unprepared.append(x)
else:
shutil.copy2(x, self.workdir+rename)
self.next_gen.append(self.workdir+rename)
self.mins = self.next_gen
def run_minimize(self):
"""
Perform the minimization runs.
"""
jobcodes = []
if self.mins:
for f in self.mins:
submitted=False
while not submitted:
if (self.get_used_licenses('Users of PSP_PLOP') < 210):
jobid = subprocess.run(["/home/data/Schrodinger2019_1/prime",
"-HOST",
"dragon_serial_mashaq",
f], cwd = self.workdir, text=True, capture_output=True)
jobcodes.append(jobid.stdout.split()[-1])
submitted = True
time.sleep(5)
else:
time.sleep(10)
self.mins_jobcodes = jobcodes
def pool_split_by_mol(self, x):
"""
Parallel worker function to split the provided minimization output file
by molecule.
Splits the filename by 'min', such that X-min-out.maegz is written
as a multi-entry file to X-min.maegz.
NOTE: This process RENAMES the ligand entry in the minimization output
file, to the same name as the receptor. So, the ligand in these split
files CANNOT be directly selected for grid generation.
Parameters
----------
x : str
Filepath to output of minimization run. Must contain the characters
'min' for filename manipulations.
"""
subprocess.run(["/home/data/Schrodinger2019_1/run",
"/home/data/mashas/tamir/T2R14_gmeiner/automation/split_by_mol.py",
self.workdir+os.path.basename(x).split('min')[0]+'min-out.maegz',
self.workdir+os.path.basename(x).split('min')[0]+'min.maegz'],
cwd=self.workdir)
def mins_to_members(self):
"""
Split the minimization job output files into protein & ligand entires.
Then transfer the list of split files to self.members and tests whether
each output file can be globbed.
"""
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_split_by_mol, self.mins)
self.pool.close()
self.members = [self.workdir+os.path.basename(x).split('min')[0]+"min.maegz" for x in self.mins]
for x in self.members:
if (glob.glob(x) == ''):
self.members.remove(x)
print("Loaded "+str(len(self.members))+" screens from minimization.")
def pool_sc(self, func_input):
"""
Parallel worker function to run ScreenConstructor methods.
Parameters
----------
func_input : list
Input parameters for the ScreenConstructor method:
Element 0: File path to a .maegz for which the screen will be constructed.
Element 1: String to identify the ligand for model loading. Can also be "noligand".
Element 2: File path to screening library file.
"""
sc = ScreenConstructor(func_input[0], func_input[1], func_input[2])
grid_command = sc.gridgen()
dock_command = sc.dock()
return [grid_command, dock_command]
def dock_from_grids(self, func_input):
"""
A parallel utility to write dock.in files using pregenerated grid .zip files instead of
.maegz docking output files.
Parameters
----------
func_input : list
Input parameters for the utility:
Element 0: File path to a .maegz for which the screen will be constructed.
Element 1: String to identify the ligand for model loading. Can also be "noligand".
Element 2: File path to screening library file.
"""
options = ["GRIDFILE\t", "./" + os.path.basename(func_input[0]),
"\nLIGANDFILE\t", "./" + os.path.basename(func_input[2]),
"\nPRECISION\tSP",
"\n"]
dock_name = self.workdir + os.path.basename(func_input[0]).split('grid')[0] + "dock.in"
with open(dock_name, 'w') as f:
f.write(''.join(options))
return [func_input[0], dock_name]
def construct_screen(self, ligand):
"""
Runs the ScreenConstructor.gridgen() and .dock() methods on each member of the generation.
Alternatively, if the first member of self.members is a .zip file, this method will
write dock.in files using the members as pre-generated grids.
NOTE: If using split files from self.mins_to_members, the ligand titles will be renamed.
Must use 'noligand' to allow automatic selection of the ligand.
Parameters
----------
ligand : str
Title of the ligand to use for grid centering.
"""
func_input = [[x, ligand, self.library] for x in self.members]
if (os.path.splitext(self.members[0])[-1] == '.zip'):
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.dock_from_grids, func_input)
self.pool.close()
else:
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_sc, func_input)
self.pool.close()
self.grids = [x[0] for x in output]
self.docks = [x[1] for x in output]
def get_used_licenses(self, label):
"""
Query the Schrodinger license server to get the number of currently occupied
Glide licenses.
"""
checklic = subprocess.run(["/home/data/Schrodinger2019_1/licadmin", "STAT"], text=True, capture_output=True)
for l in checklic.stdout.splitlines():
if label in l:
print(l)
used_licenses = l.split('licenses')[1].split()[-1]
return int(used_licenses)
def run_glide(self, input_files):
"""
Submit the provided grid.in or dock.in files to the Schrodinger job control
queueing system.
Configured to use the "dragon_serial_mashaq" queue. Will submit jobs if fewer
than 100 of the "GLIDE_SUITE_22JUN2017" licenses are in use.
Parameters
----------
dockuns : list
File paths for grid.in or dock.in files
"""
jobcodes = []
# Note that the label must include the 'Users of' prefix
# to avoid matching subsequent entries for the same category
glide_label = 'Users of GLIDE_SUITE_17JUN2019'
#glide_label = 'Users of GLIDE_SUITE_22JUN2017'
#glide_label = 'Users of GLIDE_SUITE_05JUN2019'
for f in input_files:
submitted = False
while not submitted:
if (self.get_used_licenses(glide_label) < 350):
jobid = subprocess.run(["/home/data/Schrodinger2019_1/glide",
"-HOST",
"dragon_serial_mashaq",
f], cwd = self.workdir, text=True, capture_output=True)
jobcodes.append(jobid.stdout.split()[-1])
submitted = True
time.sleep(5)
else:
#print("Number of submitted jobcodes: " + str(len(jobcodes)))
time.sleep(1*60) # 1min sleep
return jobcodes
def run_grids(self):
"""
Perform the grid generation runs.
"""
if self.grids:
self.grids_jobcodes = self.run_glide(self.grids)
else:
print("No known grid.in files - no grids generated.")
def run_docking(self):
"""
Perform the docking runs.
"""
if self.docks:
self.docks_jobcodes = self.run_glide(self.docks)
else:
print("No known dock.in files - no docking runs started.")
def load_docking_results(self):
"""
Generate the filepaths for the docking results, and test if each result file actually exists.
"""
self.screens = [x.split('dock')[0]+"dock_pv.maegz" for x in self.docks]
for x in self.screens:
if (glob.glob(x) == ''):
self.screens.remove(x)
print("Loaded "+str(len(self.screens))+" screens from docking.")
def pool_hitlist_writer(self, x):
"""
A parallel utility to write the hitlist file for the provided screen.
Parameters
----------
screen : str
Filepath for a docking result file.
"""
se = ScreenEvaluator(x, 'noligand')
se.write_hitlist()
def analyse_enrichment(self, name):
"""
Analyse the enrichment metrics for the provided screens, and save the collected
results to the provided filename.
Parameters
----------
name : str
Filename for the enrichment metrics CSV file.
"""
data = []
# First, write hitlists in parallel
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_hitlist_writer, self.screens)
self.pool.close()
for s in self.screens:
se = ScreenEvaluator(s, 'nomodel')
se.calc_enrichment(self.n_actives, self.n_decoys)
se.precision_recall(se.roclist)
df = pd.DataFrame(se.metrics, index=[0])
df['screen'] = s
data.append(df)
self.metrics = pd.concat(data)
self.metrics.to_csv(self.workdir+name)
def load_reference_screens(self, refs):
"""
Load the enrichment metric data for the given screens.
Parameters
----------
refs : list
Filepaths for reference screens to load.
"""
self.refs = refs
data = []
# # First, write hitlists in parallel
# self.pool.clear()
# self.pool.restart()
# output = self.pool.map(self.pool_hitlist_writer, self.screens)
# self.pool.close()
for s in self.refs:
se = ScreenEvaluator(s, 'nomodel')
se.write_hitlist()
se.calc_enrichment(self.n_actives, self.n_decoys)
df = pd.DataFrame(se.metrics, index=[0])
df['screen'] = s
data.append(df)
self.refs = pd.concat(data)
def metric_distance(self, row):
"""
Calculate the distance metric from maximum enrichment.
"""
auc_dist = (self.metrics['AUC'].max() - row['AUC'])/self.metrics['AUC'].max()
ef10_dist = (self.metrics['EF10'].max() - row['EF10'])/self.metrics['EF10'].max()
return auc_dist + ef10_dist
def fetch_top_screens(self, n):
"""
Return the names of the n-top enriching screens, as measured
by the distance metric calculated by "self.metric_distance".
Parameters
----------
n : int
The number of top screens to fetch.
"""
self.metrics['metric_distance'] = self.metrics.apply(lambda row: self.metric_distance(row), axis=1)
self.topmodels = self.metrics.nsmallest(n, 'metric_distance')
return self.topmodels['screen'].tolist()
def save_convergence_poses(self, name, screens, ligands):
"""
Saves a file containing the receptors and specified ligands from the given screens.
The output file is titled 'name_convergence.maegz'.
Parameters
----------
name : str
A label to be prefixed to the saved convergence pose file.
screens : list
List of screens to merge and filter.
ligands : list
List of title for ligands selected to save out poses for.
"""
with open(self.workdir+"screens.merge", 'w') as f:
for s in screens:
f.write(s+"\n")
f.close()
subprocess.run(["/home/data/Schrodinger2019_1/utilities/glide_merge",
"-o",
"merge.maegz",
"-epv",
"-f",
"screens.merge"],
cwd = self.workdir)
subprocess.run(["/home/data/Schrodinger2019_1/utilities/proplister",
"-c",
"-p",
"s_m_title",
"merge.maegz",
"-o",
"filter_titles.csv"],
cwd = self.workdir)
df = pd.read_csv(self.workdir+"filter_titles.csv")
# Detect receptor entries - once minimized, these are set to '0'
#receptors = [x for x in df['s_m_title'].tolist() if ':' in x]
receptors = ['0']
retrieve = receptors+ligands
with open(self.workdir+"filter.titles", 'w') as f:
for t in retrieve:
f.write(t+"\n")
f.close()
subprocess.run(["/home/data/Schrodinger2019_1/utilities/maesubset",
"-title_list_file",
"filter.titles",
"merge.maegz",
"-o",
name+"convergence.maegz"],
cwd = self.workdir)
# In[60]:
initial_members = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*out_pv.mae*")
# In[61]:
len(initial_members)
# In[62]:
g1 = Generation(20, initial_members, "/home/data/mashas/tamir/T2R10_cucurbitacins/ligands/bitterdb_tp_tn_dude_decoys.mae", 45, 408)
# In[63]:
#g1.construct_screen('noligand')
# In[64]:
grids = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*grid.in")
# In[65]:
g1.grids = grids
# In[56]:
#g1.run_grids()
# In[66]:
grids = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*grid.zip")
# In[67]:
len(grids)
# In[68]:
# Assemble self.docks
docks = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock.in")
# In[69]:
docks[0]
# In[70]:
len(docks)
# In[71]:
'139' in docks[0]
# In[72]:
# Remove completed runs
completed = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock*maegz")
print(len(completed))
for x in docks:
name = x.split('icb')[-1].split('out_pv')[0]
for i in completed:
if name in i:
docks.remove(x)
# In[73]:
len(docks)
# In[74]:
name
# In[75]:
g1.docks = docks
# In[76]:
len(g1.docks)
# In[77]:
g1.run_docking()
# In[78]:
docks = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock_pv.maegz")
# In[79]:
len(docks)
# In[80]:
# Remove from
# In[81]:
g1.docks = docks
# In[82]:
g1.load_docking_results()
# In[83]:
g1.analyse_enrichment('g1_allmetrics.csv')
# In[ ]:
g1.metrics = pd.read_csv(g1.workdir+'g1_allmetrics.csv')
# In[ ]:
g1.workdir
# In[ ]:
g1.fetch_top_screens(10)
# In[ ]:
g1.metrics.to_csv(g1.workdir+'g1_allmetrics_with_rank.csv')
# In[ ]:
sns.set(context="notebook", style="ticks")
plt.figure(figsize=(9,6))
sns.scatterplot(x='AUC',
y='prauc',
marker='.',
s=60,
alpha=1,
#hue="dataset",
#style="dataset",
edgecolor='',
#palette=cmap,
color=sns.xkcd_rgb['navy'],
data=g1.metrics)
#plt.xlim(0.4, 0.8)
plt.xlabel("ROC AUC")
#plt.ylim(0, 6.5)
plt.ylabel("Precision-Recall AUC")
plt.savefig("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/g1_enrichment.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# In[ ]:
# In[74]:
def plot_pr_multi(screens, color, name):
"""Plot the Precision-Recall curves for multiple provided datasets.
"""
precision = []
recall = []
for s in screens:
se = ScreenEvaluator(s, "nomodel")
#se.write_hitlist()
n_actives = 218
n_decoys = 1121
se.calc_enrichment(n_actives, n_decoys)
se.precision_recall(se.roclist)
precision.append(se.precision)
recall.append(se.recall)
# Plot
plt.figure(figsize=(8,8))
sns.set(context="poster", style="ticks")
f_scores = np.linspace(0.2, 0.8, num=4)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y>=0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.8, y[45] + 0.02))
for (p, r, c) in zip(precision, recall, color):
plt.plot(r, p, sns.xkcd_rgb[c])
plt.ylim(0,1)
plt.ylabel("Precision")
plt.xlim(0,1)
plt.xlabel("Recall")
plt.hlines([float(10)/float(84)], 0, 1, linestyles='dashed')
plt.savefig(name, dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.show()
plt.close()
# In[75]:
import random
# In[76]:
n = 10
plot_pr_multi(g1.fetch_top_screens(n), random.sample(sns.xkcd_rgb.keys(), n), "/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/screening/multi_align_2_ifd/library4_g1_top10_pr-multi-curve.png")
# ### Report PRAUC and MCC
# In[24]:
n = 10
for s in g1.fetch_top_screens(n):
se = ScreenEvaluator(s, "nomodel")
#se.write_hitlist()
n_actives = 10
n_decoys = 74
se.calc_enrichment(n_actives, n_decoys)
se.precision_recall(se.roclist)
print se.metrics['AUC'], se.metrics['AUC_SD'], se.metrics['prauc'], se.metrics['mcc'], se.metrics['f1']
# In[167]:
se.metrics
# In[ ]:
| <filename>ifdflow/ifdflow_notebook_22_08_2019.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import glob
import subprocess
import time
import math
import shutil
import pathos.pools as pp
import numpy as np
import pandas as pd
import mdtraj as md
import networkx as nx
from sklearn import metrics
import matplotlib.pyplot as plt
from adjustText import adjust_text
import seaborn as sns
# # Notes
# This notebook will contain a class and methods that enable the automatic refinement of homology models for virtual screening. The process will require the following:
# 1. A retrospective screening library containing known actives and decoys.
# 2. Selection criteria for the pose to converge upon, consisting of a ligand identity and the number of top-ranked models required to converge. When this pose is formed by the requested number of the top-ranked models, the refinement ends.
# 3. Binding site location.
#
# The following methods will be defined:
# 1. Screening - to conduct the screen of the specified library at a model in the selected binding site location.
# 2. Minimization - to minimize the binding site residues around the specified convergence pose.
# 3. Enrichment - to calculate the enrichment metrics for a completed screen.
# 4. Convergence - to detect whether the top-ranked models from a generation have converged.
# 5. Refine - a wrapper method to perform the minimization and screening methods at the generation level, since the entire generation is refined simultaneously.
#
# The classes used to structure this process:
#
# "Generation" - a collection of "Screens". The Convergence method operates at the "Generation" level.
#
# "Screen" - a single screening result, consisting of a protein with a list of hits retrieved by the protein. Each screening result will have its enrichment metrics associated with it, so that the top-ranked models can be easily selected. The "Screening" and "Enrichment" methods operate at the "Screen" level. In particular, the "Screening" method populates a "Screen" by running a virtual screening calculation on the "Screen" object's protein model.
#
# "Model" - contains a single protein conformation. This object will have the "Minimization" method as well as methods for exporting the protein coordinates.
#
# # Thoughts
# Do I need to build a job monitor to make this work? Some kind of process that can schedule jobs and report the results back when the jobs finish? Otherwise, the whole script could be stuck waiting for a single docking job to finish.
#
# Definitely need a way to schedule multiple jobs simultaneously. Could do this with a mix of manual and automated - for example, could write out a big file of Schrodinger terminal commands and then just manually give that to the Schrodinger server in Bash. Then, could re-run this notebook on the output of those jobs when they finish - that way, the notebook does the heavy lifting of checking for convergence and writing job submission files, but doesn't have to be aware of when the jobs finish. An added bonus, the notebook won't be able to automatically submit jobs so things won't get out of hand by accident.
#
# This means that a Screen object should be able to be created from a .maegz file, for easy input of the completed jobs when they finish. Simple way to do this is to have two kinds of Screen object - a ScreenConstructor, and a ScreenEvaluator
# I'd like to be able to do the convergence check within MDTraj, without needing to run the RMSD calculations through Maestro. But, this might cause problems with the binding site location when starting a new generation, because MDTraj and Maestro might shift around coordinate spaces / residue numbering. Watch out for this. So far the coordinate space seems to remain consistent.
# File Nomenclature:
#
# Models are named by Generation, Screen, and Model: "0-1-2.pdb"
#
# Screens are named by Generation and Screen: "0-1_{grid/dock}.maegz"
# In[ ]:
# In[2]:
class Model:
"""The protein-ligand complex coordinates, with a minimize method.
Attributes
----------
pose : mdtraj.Trajectory object
"""
def __init__(self, filename, ligand):
"""
Initialise the Model with an MDTraj Trajectory object created
from the given Maestro file.
The input file must contain multiple entries, with the first entry
containing just the protein and subsequent entries containing ligands.
This is the default structure for docking output .maegz files.
The protein-only entry is extracted using 'maesubset', and is used in
grid generation.
The MDTraj Trajectory object, however, must contain a ligand to enable
centroid position calculations. So, the complex must be assembled before
loading into MDTraj - this is accomplished by 'structcat'. To do this,
the desired ligand file has to be extracted from the docking output .maegz.
This file is identified by the Maestro Title string, extracted using
'maesubset' and merged. Currently, the ligand chosen is just the second
entry.
Parameters
----------
filename : str
Filename for the ligand-protein coordinates. The file must
be a Maestro format .maegz file containing a single protein molecule
as the first entry, and any number of ligand molecules in
subsequent entries.
ligand : str
Title of the ligand to be used for assembling the complex.
This ligand will feature in the model minimization, and so
is an important choice as it will guide the model architecture
during refinement. The string must correspond to the Maestro
title of the ligand entry in the input pose-viewer file. This field
is termed 's_m_title' in Maestro's internal referencing available
with the proplister tool. If the string 'noligand' is provided,
the model will be setup with the first ligand listed in the docking
input file.
"""
# Cast the provided filename as an absolute path to enable processing in
# the same directory
self.input_file = os.path.realpath(filename)
self.name = os.path.splitext(os.path.basename(self.input_file))[0]
self.output_dir = os.path.dirname(self.input_file) + "/"
self.load_status = False
# The Model is useless without a ligand. So, check if the specified ligand
# is available within the input.
# However, when assembling a new model (i.e., not one from docking output)
# a complex with the desired ligand may not be available. In this case,
# use the 'noligand' title as a manual override of sorts to force
# model loading.
self.ligand = ligand
if (self.ligand == 'noligand'):
# If the self.ligand attribute is set to 'noligand', then this model
# does not come from a docking run containing the desired convergence
# ligand. Therefore, all we really need from the ligand is the know
# the binding site location. The first ligand in the file will do.
# Use the proplister function to get this information, and update
# the self.ligand attribute to have the new ligand name.
entry_titles = subprocess.run(["/home/data/Schrodinger2018_3/utilities/proplister",
"-p",
"s_m_title",
self.input_file], text=True, capture_output=True)
self.ligand = entry_titles.stdout.rstrip("\n").split('\n')[3].strip()
checkligand = subprocess.check_output(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-title",
self.ligand,
self.input_file],
cwd = os.path.dirname(self.input_file))
# checkligand_p = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
# "-title",
# self.ligand,
# self.input_file],
# stdout = checkligand,
# cwd = os.path.dirname(self.input_file))
# checkligand_p.wait()
if (checkligand == ''):
self.ligandmatch = False
else:
self.ligandmatch = True
def load(self):
"""
Run the Model's file-conversion routines and load the MDTraj object.
This will perform multiple operations:
1. The protein structure is extracted to <self.name>-protein.mae
2. All entries matching the provided ligand name are saved to
<self.name>-matchedligands.mae.
3. The ligand structure is extracted to <self.name>-ligand.mae
4. The protein-ligand complex is assembled to <self.name>-merge.mae.
This file contains two entries in the Maestro file.
5. The protein-ligand complex file is merged to a single Maestro entry,
saved to <self.name>-complex.mae.
6. The merged complex is saved to PDB to allow reading by MDTraj, saved
to <self.name>-complex.pdb.
The resulting MDTraj Trajectory object is stored in self.pose.
"""
# Extract the protein from the pose-viewer format multi-entry .maegz
# ASSUMPTION: This step assumes that the first entry in the pose-viewer file
# is the receptor. Usually, this will be the case.
if (self.ligandmatch == False):
print("No ligand match for this model - cancel loading.")
return
with open(self.output_dir + self.name + "-protein.mae", 'w') as f:
extractprotein = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-n",
"1",
self.input_file],
stdout = f,
cwd = os.path.dirname(self.input_file))
with open(self.output_dir + self.name + "-matchedligands.mae", 'w') as f:
fetchligands = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-title",
self.ligand,
self.input_file],
stdout = f,
cwd = os.path.dirname(self.input_file))
# With no delay, the script doesn't seem able to read the -matchedligands.mae file.
# Subsequent execution with the above block commented out works as normal.
# Perhaps it just takes a little bit of time to write the file?
# Try using a 5 sec delay. Works. 1 second also works, but sometimes fails.
#time.sleep(3)
# Alternatively, try waiting for the process to terminate
fetchligands.wait()
with open(self.output_dir + self.name + "-ligand.mae", 'w') as f:
extractligand = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset",
"-n",
"1",
self.output_dir + self.name + "-matchedligands.mae"],
stdout = f,
cwd = os.path.dirname(self.input_file))
merge = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/structcat",
"-imae",
self.output_dir + self.name + "-protein.mae",
"-imae",
self.output_dir + self.name + "-ligand.mae",
"-omae",
self.output_dir + self.name + "-complex_pv.mae"])
complexation = subprocess.call(["/home/data/Schrodinger2018_3/run",
"pv_convert.py",
"-mode",
"merge",
self.output_dir + self.name + "-complex_pv.mae"])
writepdb = subprocess.call(["/usr/people/mashagr/tamird/bin/silico1.14/bin/mol_combine",
self.output_dir + self.name + "-protein.mae",
self.output_dir + self.name + "-ligand.mae",
"-O",
self.output_dir + self.name + "-complex.pdb",
"-o",
"pdb",
"-dr"])
try:
self.pose = md.load(self.output_dir + self.name + "-complex.pdb")
except IOError:
print("Could not load file: " + self.output_dir + self.name + "-complex.pdb")
self.complexfile = self.output_dir + self.name + "-complex-out_complex.mae"
self.protfile = self.output_dir + self.name + "-protein.mae"
self.load_status = True
def get_ligand_resid(self):
"""Use the MDTraj Trajectory object to automatically identify the ligand
residue name. This process relies on the ligand consisting of a single
molecule possessing a single residue name. Single-atom molecules are
explicitely ignored for this step, as some protein hydrogen atoms are
not always correctly identified as protein. The ligand residue is then
selected as the residue possessing the smallest number of atoms.
Returns
-------
str
A string containing the residue name of the ligand
"""
molecules = list(nx.connected_components(self.pose.topology.to_bondgraph()))
molecules.sort(key=len)
largemols = [m for m in molecules if (len(m)>1)]
ligand_resid = str(list(set([atom.residue.resSeq for atom in largemols[0]]))[0])
return ligand_resid
def get_molecules(self):
"""Return the molecules detected by MDTraj.
Returns
-------
list
The length of the list corresponds to the number of molecules
detected. Each element of the list is the number of atoms in each
molecule
"""
molecules = list(nx.connected_components(self.pose.topology.to_bondgraph()))
molecules.sort(key=len)
return molecules
def get_ligand_centroid(self):
"""Use the MDTraj Trajectory object to find the centroid of the ligand.
Returns
-------
list
A three-element list containing the X, Y, and Z coordinates of the
position corresponding to the centroid of the ligand atoms.
"""
ligand_atoms = self.pose.topology.select("resSeq "+self.get_ligand_resid())
centroid = np.mean(self.pose.xyz[0][ligand_atoms], axis=0)*10
return list(centroid)
def minimize(self):
"""Write the Schrodinger command to minimize the ligand-protein
complex contained in the model object.
This method tries to automatically identify the ligand. For this,
the PDB-export version of the original Maestro file is used,
as MDTraj cannot read Maestro formats.
Parameters
----------
name : str
The name of the Maestro cordinate file to be used, excluding
the file path and extension. The minimization command will use
this file, and the options file is also named using this string.
Returns
-------
string
A shell command to use the Schrodinger suit to minimize the
model. This command can be executed any time, using the coordinate
file written. The command references an options file, which is
written to the disk using the name parameter
"""
# Write the minimization job input file
# This can be executed using the following in the terminal:
# >>$SCHRODINGER/prime jobname
# Identify the ligand
ligand_resid = self.get_ligand_resid()
options = ["STRUCT_FILE\t", os.path.basename(self.complexfile),
"\nJOB_TYPE\tREFINE",
"\nPRIME_TYPE\tSITE_OPT",
"\nSELECT\tasl = fillres within 5.000000 ( res.num ", ligand_resid, ")",
"\nLIGAND\tasl = res.num ", ligand_resid,
"\nNPASSES\t1",
"\nINCRE_PACKING\tno",
"\nUSE_CRYSTAL_SYMMETRY\tno",
"\nUSE_RANDOM_SEED\tno",
"\nSEED\t0",
"\nOPLS_VERSION\tOPLS3e",
"\nEXT_DIEL\t80.00",
"\nUSE_MEMBRANE\tno"]
jobname = self.name + "-min"
with open(self.output_dir + jobname + ".inp", 'w') as f:
f.write(''.join(options))
#command = "/home/data/Schrodinger2018_3/prime " + jobname
return self.output_dir + jobname + ".inp"
# In[35]:
testfile = "/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/icb_1-out_pv.mae"
x = Model(testfile, "noligand")
# In[36]:
x.ligand
# In[27]:
entry_titles = subprocess.run(["/home/data/Schrodinger2018_3/utilities/proplister",
"-p",
"s_m_title",
testfile], text=True, capture_output=True)
# In[28]:
type(entry_titles.stdout)
# In[29]:
entry_titles.stdout
# In[31]:
entry_titles.stdout.rstrip("\n").split('\n')[3].strip()
# In[ ]:
self.ligand = entry_titles.rstrip("\n").split('\n')[3].strip()
# In[ ]:
# In[3]:
class Grid:
def __init__(self, filename):
"""
In some cases, protein structure files may be generated without
any bound or docked ligands. Generating a grid from these structures
requires identifying a binding site independantly of any bound
ligand.
This can be done by specifying a set of residues defining the borders
of the site and defining an equidistant center.
Parameters
----------
filename : str
PDB file containing a single protein without any ligands.
"""
self.input_file = os.path.realpath(filename)
self.name = os.path.splitext(self.input_file)[0].split('/')[-1]
self.output_dir = os.path.dirname(self.input_file) + "/"
self.maename = self.name.split('Q9NYV8.B99990')[-1]
try:
self.t = md.load(self.input_file)
except IOError:
print("Could not load file: " + self.input_file)
def find_site_center(self, selected_residues):
"""
Identify the center of the provided residues.
Parameters
----------
selected_residues : lst
Collection of residue index numbers drawn from
the topology of the file.
"""
try:
self.residue_centers = [np.mean(self.t.xyz[0][self.t.topology.select("resSeq "+str(i))], axis=0) for i in selected_residues]
self.site_center = np.mean(np.asarray(self.residue_centers), axis=0) * 10
except IndexError:
print("Unable to select residue "+str(i)+". Site not found.")
def convert_to_mae(self):
"""
Convert the input file to a MAE format file.
"""
convert = subprocess.call(["/home/data/Schrodinger2019_1/utilities/structconvert",
"-ipdb",
self.input_file,
"-omae",
self.output_dir + self.name + ".mae"])
def generate_grid(self):
"""
Write the grid.in file for running the grid generation job.
"""
options = ["GRID_CENTER\t", str(self.site_center[0]), ",", str(self.site_center[1]), ",", str(self.site_center[2]), ",",
"\nGRIDFILE\t", self.name + "-grid.zip",
"\nINNERBOX\t10, 10, 10",
"\nOUTERBOX\t25, 25, 25",
"\nRECEP_FILE\t", "out_" + str(self.maename) + ".mae",
"\n"]
grid_file = self.output_dir + self.name + "-grid.in"
with open(grid_file, 'w') as f:
f.write(''.join(options))
return grid_file
# In[177]:
g = Grid("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/Q9NYV8.B99990001.pdb")
# In[178]:
g.find_site_center([69, 89, 175])
# In[179]:
g.residue_centers
# In[180]:
g.site_center
# In[87]:
g.convert_to_mae()
# In[88]:
g.generate_grid()
# In[181]:
models = glob.glob("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/*pdb")
# In[182]:
for m in models:
g = Grid(m)
g.find_site_center([89, 69, 175])
g.generate_grid()
# In[92]:
gridjobs = glob.glob("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/screening/multi_align_2/*grid*in")
# In[171]:
t = md.load("/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/models/multi_align_2/Q9NYV8.B99990001.pdb")
# In[175]:
for i in t.topology.select("resSeq 89 69 175"):
print t.topology.atom(i).residue
# In[ ]:
# In[3]:
class IFDProcessor:
def __init__(self, filename):
"""Given a Maestro file containing the results of an Induced Fit Docking calculation
(i.e., multiple entries each containing a protein and ligand), produce a "docking output"
style of Maestro file (i.e., containing a single protein molecule as the first entry,
followed by another entry for the ligand).
Parameters
----------
name : str
Filename for the IFD results file. The file must
be a Maestro format .maegz file containing
a single entry, consisting of a ligand-protein complex.
"""
self.input_file = os.path.realpath(filename)
self.output_file = "No output yet"
def run(self):
"""Execute the conversion of the IFD docking results file into a Maestro Pose Viewer
file.
"""
# Split the IFD file into protein and ligand
# Use subprocess.Popen to allow specifying of the directory
split = subprocess.Popen(["/home/data/Schrodinger2019_1/run",
"pv_convert.py",
"-mode",
"split_pv",
self.input_file,
"-lig_last_mol"],
cwd=os.path.dirname(self.input_file))
output_name = os.path.splitext(os.path.basename(self.input_file))[0] + "-out_pv" + os.path.splitext(os.path.basename(self.input_file))[1]
self.output_file = os.path.dirname(self.input_file) + "/" + output_name
# In[12]:
ifds = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*mae")
# In[13]:
len(ifds)
# In[18]:
x = IFDProcessor(ifds[0])
# In[19]:
x.run()
# In[20]:
x.output_file
# In[21]:
for i in ifds:
x = IFDProcessor(i)
x.run()
# In[ ]:
# In[4]:
class ScreenConstructor:
def __init__(self, model_path, ligand, library_path):
"""Initialise the ScreenConstructor with the paths to the ligand-protein
model and retrospective screening library files. The model path is
used to add a Model object.
Parameters
----------
model_name : str
Name of the docking output .maegz file, beginning with an entry
consisting of a single molecule protein and followed by at least one
entry consisting of a single molecule ligand
ligand : str
Label for the ligand used to load the model.
library_path : str
Path to the retrospective screening library, containing the actives
and decoys to be screened.
"""
self.model = Model(model_path, ligand)
self.library_path = os.path.realpath(library_path)
self.output_dir = os.path.dirname(os.path.realpath(model_path)) + "/"
def gridgen(self):
"""
Write the Schrodinger input files and return the name of the grid.in file.
"""
# Load the Model's coordinates - includes a check for ligand presence
if (self.model.load_status == False):
self.model.load()
# Identify the ligand centroid
ligand_centroid = self.model.get_ligand_centroid()
# Grid generation options
options = ["GRID_CENTER\t", str(ligand_centroid[0]), ",", str(ligand_centroid[1]), ",", str(ligand_centroid[2]), ",",
"\nGRIDFILE\t", self.model.name + "-grid.zip",
"\nINNERBOX\t10, 10, 10",
"\nOUTERBOX\t25, 25, 25",
"\nRECEP_FILE\t", os.path.basename(self.model.protfile),
"\n"]
grid_file = self.output_dir + self.model.name + "-grid.in"
with open(grid_file, 'w') as f:
f.write(''.join(options))
return grid_file
def dock(self):
"""Write the Schrodinger input files and return the execution command to perform
screening, using the created grid and the specified library file.
"""
options = ["GRIDFILE\t", self.model.name + "-grid.zip",
"\nLIGANDFILE\t", os.path.basename(self.library_path),
"\nPRECISION\tSP",
"\n"]
dock_file = self.output_dir + self.model.name + "-dock.in"
with open(dock_file, 'w') as f:
f.write(''.join(options))
return dock_file
# In[5]:
class ScreenEvaluator:
def __init__(self, model_path, ligand):
"""Initialise the ScreenEvaluator with the screening results file. This populates
the self.results object. The Model object is not populated upon initialization, to
allow rapid inspection of ScreenEvaluator objects.
The self.results object contains the hitlist from the screening experiment as a
Pandas dataframe, specifying the Maestro Title, Docking Score, and Decoy status
of each hit.
Parameters
----------
model_path : str
Name of the docking output .maegz file, beginning with an entry
consisting of a single molecule protein and followed by at least one
entry consisting of a single molecule ligand
ligand : str
Name of the ligand used to load the model. This is passed directly to
the Model() class method. Options include any string corresponding
to a ligand title, and 'noligand', which triggers automatic identification
of the ligand. Alternatively, to bypass model loading, can pass 'nomodel'
"""
# Extract the project table results from the docking output file
self.input_file = os.path.realpath(model_path)
self.name = os.path.splitext(os.path.basename(self.input_file))[0]
self.output_dir = os.path.dirname(self.input_file) + "/"
# Model loading is a slow step, and not required if a hitlist
# has already been written (usually)
if not (ligand == 'nomodel'):
self.model = Model(model_path, ligand)
self.results = ''
self.metrics = {}
def write_hitlist(self):
"""Write the docking hitlist to disk. The hitlist is saved as a .csv
with the following fields:
s_m_title : The Maestro title field for each entry.
r_i_docking_score : Docking score for each hit.
s_user_RetroScreenRole : type of compound (active/decoy)
This data is loaded into self.results as a Pandas dataframe.
"""
with open(self.output_dir + self.name + "-results.csv", 'w') as f:
extractprotein = subprocess.call(["/home/data/Schrodinger2019_1/utilities/proplister",
"-c",
"-p",
"s_m_title",
"-p",
"r_i_docking_score",
"-p",
"s_user_RetroScreenRole",
"-p",
"s_user_CompoundSet",
self.input_file],
stdout = f)
def compare_score(self, active_score, decoy_score):
"""Function used in AUC calculation to build a score tally based on
the relationship between an active score and a decoy score.
Currently, this function is set to assume that lower scores are better.
Accordingly, the AUC is increased when an active has a lower score
than a decoy.
Parameters
----------
active_score : float
A docking score for an active compound.
decoy_score : float
A docking score for a decoy compound.
"""
if active_score < decoy_score:
return float(1)
elif active_score > decoy_score:
return float(0)
elif active_score == decoy_score:
return float(0.5)
def calc_auc(self, actives_scores, decoys_scores, n_actives, n_decoys):
"""Calculate the ROC AUC for a set of actives and decoys.
This routine scales the AUC number depending on how many of the actives
and decoys present in the input are present in the score lists.
The user provides the number of actives and decoys originally used
as input to the screen, and the number of items in the actives_scores
and decoys_scores lists are used to determine the scaling of the AUC.
Parameters
----------
actives_scores : list
List of docking scores for actives
decoys_scores : list
List of docking scores for decoys
n_actives : int
Number of active compounds used as input to this screen
n_decoys : int
Number of decoy compounds used as input to this screen
Returns
-------
auc : float
A scaled AUC that reflects the probability that an active will
score better than a decoy.
"""
n_a_found = float(len(actives_scores))
n_d_found = float(len(decoys_scores))
n_a_input = float(n_actives)
n_d_input = float(n_decoys)
score_compare_tally = float(0)
for a in actives_scores:
for d in decoys_scores:
score_compare_tally += self.compare_score(a, d)
auc_raw = float(score_compare_tally / (n_a_found * n_d_found))
auc_scaled = float((auc_raw * (n_a_found / n_a_input) * (n_d_found / n_d_input)) + ((1 - (n_d_found/n_d_input)) * (n_a_found/n_a_input)))
return auc_scaled
def calc_ef_and_sd(self, threshold, n_actives, n_decoys):
"""Calculate the enrichment factor at the selected threshold.
The EF is calculated as the ratio between an arbitrarily-selected
fraction of inactives and the fraction of actives retrieved at that
fraction of inactives. This definition also corresponds to the
gradient of a line constructured from the origin to the ROC-curve value
coordinate at the selected inactive fraction.
Parameters
----------
threshold : float
The fraction of inactives at which to determine the EF.
I.e., EF1% corresponds to a threshold of 0.01.
n_actives : int
Number of actives used as input to the screen
n_decoys : int
Number of decoys used as input to the screen.
Returns
-------
ef : float
The enrichment factor.
sd : float
The analytical error of the enrichment factor
"""
for (i, a) in zip(self.roclist_x, self.roclist_y):
if i > threshold:
ef = float(a/i)
break
s = (a*np.log(a)) / (i*np.log(i))
sd = (((a*(1-a))/float(n_actives)) + ((s**2)*i*(1-i))/n_decoys) / (i**2)
return (ef, sd)
def calc_ci95(self, variance):
"""Calculates a 95% confidence interval for any metric, with the
assumption that the distribution is normal.
Caveat from the original paper:
"This will break down for systems with very large errors (generally
due to having very few actives or decoys) ... When large errors do
arise the exact magnitude of the error is generally less relevant than
the fact that the error is large"
Parameters
----------
variance : float
The variance of any metric
Returns
-------
ci95 : float
The 95% confidence interval. Adding and subtracting this value
from the mean value of the metric will give the values that define
the borders of 95% confidence for that value.
"""
ci95 = 1.96 * variance**0.5
return ci95
def precision_recall(self, hitlist):
"""Calculate the precision-recall profile and associated summary statistics
(F1 score, Matthews correlation quality, Youden's J, PR-AUC).
The summary statistics are defined for every point along the precision-recall
curve, so will report the maximal value.
Parameters
----------
hitlist : list
A list containing 'active' and 'decoy' entries. Can be generated from the self.results.role
Pandas sheet column.
"""
self.precision = []
self.recall = []
self.f1 = []
self.mcc = []
self.youdenj = []
# Move the threshold through the hitlist
for i in range(1, len(hitlist)):
toplist = hitlist[0:i]
bottomlist = hitlist[i:]
tp = 0
fp = 0
tn = 0
fn = 0
# At each step, calculate the precision and recall
for x in toplist:
if x == 'active':
tp += 1
elif x == 'decoy':
fp += 1
for x in bottomlist:
if x == 'decoy':
tn += 1
elif x == 'active':
fn += 1
#print len(toplist), len(bottomlist), "TP: ", tp, "FP: ", fp, "TN: ", tn, "FN: ", fn
precision = float(tp) / float(tp + fp)
recall = float(tp) / float(tp + fn)
self.precision.append(precision)
self.recall.append(recall)
# Calculate the summary statistics
f1 = np.reciprocal(((np.reciprocal(recall)) + (np.reciprocal(precision)))/2)
self.f1.append(f1)
mcc = float((tp * tn) - (fp * fn)) / float(math.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
self.mcc.append(mcc)
youdenj = (float(tp)/float(tp+fn)) + (float(tn)/float(tn+fp)) - 1
self.youdenj.append(youdenj)
# Calculate PRAUC
self.metrics['prauc'] = metrics.auc(self.recall, self.precision)
# Store the summary statistics
self.metrics['f1'] = max(self.f1)
self.metrics['mcc'] = max(self.mcc)
self.metrics['youden_j'] = max(self.youdenj)
# Store the score cutoff associated with the maximum Youden's J index
self.metrics['youden_j_score'] = self.results['score'][self.youdenj.index(max(self.youdenj))]
def calc_enrichment(self, n_actives, n_decoys):
"""Calculate enrichment metrics for this screen. The metrics are stored in the
self.metrics dictionary object.
Parameters
----------
n_actives : int
The number of actives in the input library used to perform the screen.
n_decoys : int
The number of decoys in the input library used to perform the screen.
"""
# Load hitlist
try:
self.results = pd.read_csv(self.output_dir + self.name + "-results.csv", names = ['title', 'score', 'role', 'compoundset'], skiprows = [0,1])
except IOError:
print("File not found: " + self.output_dir + self.name + "-results.csv")
return
# Remove duplicate items from the hitlist.
self.results.drop_duplicates(subset='title', keep="first", inplace=True)
# Extract score lists for actives and decoys
self.actives_scores = self.results[self.results["role"] == 'active'].score.tolist()
self.decoys_scores = self.results[self.results["role"] == 'decoy'].score.tolist()
self.roclist = self.results.role.tolist()
# Prepare ROC lists
x = [0]
y = [0]
d_count = 0
a_count = 0
for i in self.roclist:
if i == 'decoy':
d_count += 1
elif i == 'active':
a_count += 1
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
self.roclist_x = np.asarray(x, dtype=float)/float(d_count)
self.roclist_y = np.asarray(y, dtype=float)/float(a_count)
# Populate the self.enrichment object with the metrics
self.metrics['AUC'] = self.calc_auc(self.actives_scores, self.decoys_scores, n_actives, n_decoys)
q1 = self.metrics['AUC'] / float(2-self.metrics['AUC'])
q2 = (2*(self.metrics['AUC'])**2) / (1 + self.metrics['AUC'])
self.metrics['AUC_SD'] = (self.metrics['AUC']*(1-self.metrics['AUC']) + ((n_actives-1)*(q1-(self.metrics['AUC'])**2)) + ((n_decoys-1)*(q2-(self.metrics['AUC'])**2))) / float(n_actives * n_decoys)
self.metrics['EF1'], self.metrics['EF1_SD'] = self.calc_ef_and_sd(0.01, n_actives, n_decoys)
self.metrics['EF2'], self.metrics['EF2_SD'] = self.calc_ef_and_sd(0.02, n_actives, n_decoys)
self.metrics['EF5'], self.metrics['EF5_SD'] = self.calc_ef_and_sd(0.05, n_actives, n_decoys)
self.metrics['EF10'], self.metrics['EF10_SD'] = self.calc_ef_and_sd(0.10, n_actives, n_decoys)
self.metrics['AUC_CI95'] = self.calc_ci95(self.metrics['AUC_SD'])
self.metrics['EF1_CI95'] = self.calc_ci95(self.metrics['EF1_SD'])
self.metrics['EF2_CI95'] = self.calc_ci95(self.metrics['EF2_SD'])
self.metrics['EF5_CI95'] = self.calc_ci95(self.metrics['EF5_SD'])
self.metrics['EF10_CI95'] = self.calc_ci95(self.metrics['EF10_SD'])
def plot_roc(self, color):
"""Plot the ROC curve and save to a file named <self.name>_roc.png.
"""
x = [0]
y = [0]
d_count = 0
a_count = 0
for i in self.roclist:
if i == 'decoy':
d_count += 1
elif i == 'active':
a_count += 1
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
x_scale = np.asarray(x, dtype=float)/float(d_count)
y_scale = np.asarray(y, dtype=float)/float(a_count)
plt.figure(figsize=(3,3))
sns.set(context="notebook", style="ticks")
plt.plot([0,1], [0,1], color=sns.xkcd_rgb["grey"], linestyle='--')
plt.plot(x_scale, y_scale, sns.xkcd_rgb[color])
plt.ylim(0,1)
plt.ylabel("Actives")
plt.xlim(0,1)
plt.xlabel("Inactives")
plt.savefig(self.output_dir + self.name + "_roc.png", dpi=300, format="png", bbox_inches="tight", transparent=True)
def plot_roc_marked(self, color, watch_compound, watch_role):
"""Plot the ROC curve and add text labels to compounds
specified.
Parameters
----------
color : str
The colour to plot the ROC curve in.
watch_compound : str
Text label to match against the "compoundset" property for
selecting the marked compounds.
watch_role : str
Text label to match against the "role" property for selecting
the marked compounds.
"""
x = [0]
y = [0]
marked_y = []
marked_x = []
marked_labels = []
d_count = 0
a_count = 0
for i in range(0, len(self.results)):
if (self.results.iloc[i]['role'] == 'decoy'):
d_count += 1
elif (self.results.iloc[i]['role'] == 'active'):
a_count += 1
if (self.results.iloc[i]['compoundset'] == watch_compound) and (self.results.iloc[i]['role'] == watch_role):
marked_y.append(a_count)
marked_x.append(d_count)
marked_labels.append(self.results.iloc[i]['title'])
x.append(d_count)
y.append(a_count)
#Scale the coordinates by the total numbers
x_scale = np.asarray(x, dtype=float)/float(d_count)
y_scale = np.asarray(y, dtype=float)/float(a_count)
marked_x_scale = np.asarray(marked_x, dtype=float)/float(d_count)
marked_y_scale = np.asarray(marked_y, dtype=float)/float(a_count)
plt.figure(figsize=(6,6))
sns.set(context="notebook", style="ticks")
plt.plot([0,1], [0,1], color=sns.xkcd_rgb["grey"], linestyle='--')
plt.plot(x_scale, y_scale, sns.xkcd_rgb[color])
plt.scatter(marked_x_scale, marked_y_scale, color='k', marker='x', s=50)
texts = []
align = 'right'
for index,(i,j) in enumerate(zip(marked_x_scale, marked_y_scale)):
if (align == 'right'):
texts.append(plt.text(i+.02, j-0.01, marked_labels[index]))
align = 'left'
elif (align == 'left'):
texts.append(plt.text(i-0.02*len(marked_labels[index]), j-0.01, marked_labels[index]))
align = 'right'
plt.ylim(0,1)
plt.ylabel("Actives")
plt.xlim(0,1)
plt.xlabel("Inactives")
adjust_text(texts)
plt.savefig(self.output_dir + self.name + "_roc_" + watch_role + "_" + watch_compound + ".png", dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.close()
def plot_pr(self, color):
"""Plot the Precision-Recall curve.
"""
plt.figure(figsize=(7,7))
sns.set(context="notebook", style="ticks")
f_scores = np.linspace(0.2, 0.8, num=4)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y>=0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
plt.plot(self.recall, self.precision, sns.xkcd_rgb[color])
plt.ylim(0,1)
plt.ylabel("Precision")
plt.xlim(0,1)
plt.xlabel("Recall")
plt.savefig(self.output_dir + self.name + "_pr.png", dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.close()
# In[41]:
x = "/home/data/mashas/tamir/T2R14_gmeiner/library3/ifd_1743-out_pv-dock_pv.maegz"
# In[42]:
s = ScreenEvaluator(x, "nomodel")
# In[43]:
n_actives = 193
n_decoys = 731
# In[44]:
s.calc_enrichment(n_actives, n_decoys)
s.precision_recall(s.roclist)
df = pd.DataFrame(s.metrics, index=[0])
df['screen'] = x
# In[45]:
s.results['score'][s.youdenj.index(max(s.youdenj))]
# In[46]:
max(s.youdenj)
# In[48]:
df.to_csv("/home/data/mashas/tamir/T2R14_gmeiner/fastrocs/top1-1743-data.csv")
# # Putting it all together
# A generation object can be a wrapper for all of the functions that need to be run on all the screens in a generation. These are:
# 1. Make receptor grids
# 2. Dock the library
# 3. Save enrichment metrics
# 4. Minimize receptors in complex with convergence ligands
#
# One important thing to include is a way to check which steps finish successfully for each member. When there's an error, it would be convenient to have a simple way to re-run a step only on the ones that failed.
#
# Also, a better way to schedule jobs with a "check-in, check-out" system. At the moment, there's an arbitrarily coded delay time to make sure the license server has time to update the number of jobs licenses currently using a license to ensure that we don't submit a job when there are no licenses available. However, this doesn't allow for fast and accurate submission, so the load-balancing is inefficient (especially for minimization jobs). A system which keeps track of which jobs have started, are running, and have finished would be able to more finely manage this process.
# In[59]:
class Generation:
"""
The Generation object indexes a collection of screens and provides a wrapper for functions
that are run on all the screens.
"""
def __init__(self, n_processes, members, library, n_actives, n_decoys):
"""
Initialise a Generation object with the provided models. These can be docking output files
(.maegz) or pregenerated grid (.zip) files.
Parameters
----------
n_processes : int
Number of simultaneous processes through which to run.
members : list
List of screen filepaths.
library : str
Filepath for the screening library.
n_actives : int
Number of actives.
n_decoys : int
Number of decoys.
"""
self.n_processes = n_processes
self.members = [os.path.realpath(x) for x in members]
self.workdir = os.path.dirname(self.members[0]) + "/"
self.library = os.path.realpath(library)
self.n_actives = n_actives
self.n_decoys = n_decoys
self.pool = pp.ProcessPool(self.n_processes)
def pool_min(self, min_input):
"""
Parallel utility to write the minimization job input.
"""
(x, ligand) = min_input
m = Model(x, ligand)
# If the model can't load, there is no pose for the specified ligand.
# If the ligand isn't found in the file, then no pose for the ligand.
# In these cases, don't want to run the minimization job, so don't
# return any filename.
m.load()
if (m.load_status & m.ligandmatch):
min_name = m.minimize()
return min_name
def write_minimize(self, ligand):
"""
Write the minimization run files for the generation members.
Parameters
----------
ligand : str
Title for the ligand to pass to Model.
"""
min_input = [(x, ligand) for x in self.members]
if not (os.path.splitext(self.members[0])[-1] == '.zip'):
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_min, min_input)
self.pool.close()
self.mins = output
else:
print("The loaded models are pregenerated grids (.zips) - No minimization files written.")
def prepare_next_generation(self, pattern, name):
"""
Rename the minimization run files to specify the names for the next generation of model files
that minimization produces. If the minimization output file already exists, or if
the required input file is not present, the minimization run file is not renamed but rather
stored in self.min_exists or self.min_unprepared, respectively.
Parameters
----------
name : str
Label to insert into the filename
"""
self.min_exists = []
self.min_unprepared = []
self.next_gen = []
for x in self.mins:
if (x != None):
ext = os.path.splitext(x)[-1]
rename = os.path.basename(x).split(pattern)[0]+name+"-min"+ext
output = os.path.basename(x).split(pattern)[0]+name+"-min-out.maegz"
# Check if the minimization parameter file already exists
if (len(glob.glob(self.workdir+output)) == 1):
self.min_exists.append(self.workdir+rename)
# Check if the required input file is present
elif (len(glob.glob(self.workdir+os.path.basename(x).split(pattern)[0]+"-*complex.mae")) == 0):
self.min_unprepared.append(x)
else:
shutil.copy2(x, self.workdir+rename)
self.next_gen.append(self.workdir+rename)
self.mins = self.next_gen
def run_minimize(self):
"""
Perform the minimization runs.
"""
jobcodes = []
if self.mins:
for f in self.mins:
submitted=False
while not submitted:
if (self.get_used_licenses('Users of PSP_PLOP') < 210):
jobid = subprocess.run(["/home/data/Schrodinger2019_1/prime",
"-HOST",
"dragon_serial_mashaq",
f], cwd = self.workdir, text=True, capture_output=True)
jobcodes.append(jobid.stdout.split()[-1])
submitted = True
time.sleep(5)
else:
time.sleep(10)
self.mins_jobcodes = jobcodes
def pool_split_by_mol(self, x):
"""
Parallel worker function to split the provided minimization output file
by molecule.
Splits the filename by 'min', such that X-min-out.maegz is written
as a multi-entry file to X-min.maegz.
NOTE: This process RENAMES the ligand entry in the minimization output
file, to the same name as the receptor. So, the ligand in these split
files CANNOT be directly selected for grid generation.
Parameters
----------
x : str
Filepath to output of minimization run. Must contain the characters
'min' for filename manipulations.
"""
subprocess.run(["/home/data/Schrodinger2019_1/run",
"/home/data/mashas/tamir/T2R14_gmeiner/automation/split_by_mol.py",
self.workdir+os.path.basename(x).split('min')[0]+'min-out.maegz',
self.workdir+os.path.basename(x).split('min')[0]+'min.maegz'],
cwd=self.workdir)
def mins_to_members(self):
"""
Split the minimization job output files into protein & ligand entires.
Then transfer the list of split files to self.members and tests whether
each output file can be globbed.
"""
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_split_by_mol, self.mins)
self.pool.close()
self.members = [self.workdir+os.path.basename(x).split('min')[0]+"min.maegz" for x in self.mins]
for x in self.members:
if (glob.glob(x) == ''):
self.members.remove(x)
print("Loaded "+str(len(self.members))+" screens from minimization.")
def pool_sc(self, func_input):
"""
Parallel worker function to run ScreenConstructor methods.
Parameters
----------
func_input : list
Input parameters for the ScreenConstructor method:
Element 0: File path to a .maegz for which the screen will be constructed.
Element 1: String to identify the ligand for model loading. Can also be "noligand".
Element 2: File path to screening library file.
"""
sc = ScreenConstructor(func_input[0], func_input[1], func_input[2])
grid_command = sc.gridgen()
dock_command = sc.dock()
return [grid_command, dock_command]
def dock_from_grids(self, func_input):
"""
A parallel utility to write dock.in files using pregenerated grid .zip files instead of
.maegz docking output files.
Parameters
----------
func_input : list
Input parameters for the utility:
Element 0: File path to a .maegz for which the screen will be constructed.
Element 1: String to identify the ligand for model loading. Can also be "noligand".
Element 2: File path to screening library file.
"""
options = ["GRIDFILE\t", "./" + os.path.basename(func_input[0]),
"\nLIGANDFILE\t", "./" + os.path.basename(func_input[2]),
"\nPRECISION\tSP",
"\n"]
dock_name = self.workdir + os.path.basename(func_input[0]).split('grid')[0] + "dock.in"
with open(dock_name, 'w') as f:
f.write(''.join(options))
return [func_input[0], dock_name]
def construct_screen(self, ligand):
"""
Runs the ScreenConstructor.gridgen() and .dock() methods on each member of the generation.
Alternatively, if the first member of self.members is a .zip file, this method will
write dock.in files using the members as pre-generated grids.
NOTE: If using split files from self.mins_to_members, the ligand titles will be renamed.
Must use 'noligand' to allow automatic selection of the ligand.
Parameters
----------
ligand : str
Title of the ligand to use for grid centering.
"""
func_input = [[x, ligand, self.library] for x in self.members]
if (os.path.splitext(self.members[0])[-1] == '.zip'):
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.dock_from_grids, func_input)
self.pool.close()
else:
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_sc, func_input)
self.pool.close()
self.grids = [x[0] for x in output]
self.docks = [x[1] for x in output]
def get_used_licenses(self, label):
"""
Query the Schrodinger license server to get the number of currently occupied
Glide licenses.
"""
checklic = subprocess.run(["/home/data/Schrodinger2019_1/licadmin", "STAT"], text=True, capture_output=True)
for l in checklic.stdout.splitlines():
if label in l:
print(l)
used_licenses = l.split('licenses')[1].split()[-1]
return int(used_licenses)
def run_glide(self, input_files):
"""
Submit the provided grid.in or dock.in files to the Schrodinger job control
queueing system.
Configured to use the "dragon_serial_mashaq" queue. Will submit jobs if fewer
than 100 of the "GLIDE_SUITE_22JUN2017" licenses are in use.
Parameters
----------
dockuns : list
File paths for grid.in or dock.in files
"""
jobcodes = []
# Note that the label must include the 'Users of' prefix
# to avoid matching subsequent entries for the same category
glide_label = 'Users of GLIDE_SUITE_17JUN2019'
#glide_label = 'Users of GLIDE_SUITE_22JUN2017'
#glide_label = 'Users of GLIDE_SUITE_05JUN2019'
for f in input_files:
submitted = False
while not submitted:
if (self.get_used_licenses(glide_label) < 350):
jobid = subprocess.run(["/home/data/Schrodinger2019_1/glide",
"-HOST",
"dragon_serial_mashaq",
f], cwd = self.workdir, text=True, capture_output=True)
jobcodes.append(jobid.stdout.split()[-1])
submitted = True
time.sleep(5)
else:
#print("Number of submitted jobcodes: " + str(len(jobcodes)))
time.sleep(1*60) # 1min sleep
return jobcodes
def run_grids(self):
"""
Perform the grid generation runs.
"""
if self.grids:
self.grids_jobcodes = self.run_glide(self.grids)
else:
print("No known grid.in files - no grids generated.")
def run_docking(self):
"""
Perform the docking runs.
"""
if self.docks:
self.docks_jobcodes = self.run_glide(self.docks)
else:
print("No known dock.in files - no docking runs started.")
def load_docking_results(self):
"""
Generate the filepaths for the docking results, and test if each result file actually exists.
"""
self.screens = [x.split('dock')[0]+"dock_pv.maegz" for x in self.docks]
for x in self.screens:
if (glob.glob(x) == ''):
self.screens.remove(x)
print("Loaded "+str(len(self.screens))+" screens from docking.")
def pool_hitlist_writer(self, x):
"""
A parallel utility to write the hitlist file for the provided screen.
Parameters
----------
screen : str
Filepath for a docking result file.
"""
se = ScreenEvaluator(x, 'noligand')
se.write_hitlist()
def analyse_enrichment(self, name):
"""
Analyse the enrichment metrics for the provided screens, and save the collected
results to the provided filename.
Parameters
----------
name : str
Filename for the enrichment metrics CSV file.
"""
data = []
# First, write hitlists in parallel
self.pool.clear()
self.pool.restart()
output = self.pool.map(self.pool_hitlist_writer, self.screens)
self.pool.close()
for s in self.screens:
se = ScreenEvaluator(s, 'nomodel')
se.calc_enrichment(self.n_actives, self.n_decoys)
se.precision_recall(se.roclist)
df = pd.DataFrame(se.metrics, index=[0])
df['screen'] = s
data.append(df)
self.metrics = pd.concat(data)
self.metrics.to_csv(self.workdir+name)
def load_reference_screens(self, refs):
"""
Load the enrichment metric data for the given screens.
Parameters
----------
refs : list
Filepaths for reference screens to load.
"""
self.refs = refs
data = []
# # First, write hitlists in parallel
# self.pool.clear()
# self.pool.restart()
# output = self.pool.map(self.pool_hitlist_writer, self.screens)
# self.pool.close()
for s in self.refs:
se = ScreenEvaluator(s, 'nomodel')
se.write_hitlist()
se.calc_enrichment(self.n_actives, self.n_decoys)
df = pd.DataFrame(se.metrics, index=[0])
df['screen'] = s
data.append(df)
self.refs = pd.concat(data)
def metric_distance(self, row):
"""
Calculate the distance metric from maximum enrichment.
"""
auc_dist = (self.metrics['AUC'].max() - row['AUC'])/self.metrics['AUC'].max()
ef10_dist = (self.metrics['EF10'].max() - row['EF10'])/self.metrics['EF10'].max()
return auc_dist + ef10_dist
def fetch_top_screens(self, n):
"""
Return the names of the n-top enriching screens, as measured
by the distance metric calculated by "self.metric_distance".
Parameters
----------
n : int
The number of top screens to fetch.
"""
self.metrics['metric_distance'] = self.metrics.apply(lambda row: self.metric_distance(row), axis=1)
self.topmodels = self.metrics.nsmallest(n, 'metric_distance')
return self.topmodels['screen'].tolist()
def save_convergence_poses(self, name, screens, ligands):
"""
Saves a file containing the receptors and specified ligands from the given screens.
The output file is titled 'name_convergence.maegz'.
Parameters
----------
name : str
A label to be prefixed to the saved convergence pose file.
screens : list
List of screens to merge and filter.
ligands : list
List of title for ligands selected to save out poses for.
"""
with open(self.workdir+"screens.merge", 'w') as f:
for s in screens:
f.write(s+"\n")
f.close()
subprocess.run(["/home/data/Schrodinger2019_1/utilities/glide_merge",
"-o",
"merge.maegz",
"-epv",
"-f",
"screens.merge"],
cwd = self.workdir)
subprocess.run(["/home/data/Schrodinger2019_1/utilities/proplister",
"-c",
"-p",
"s_m_title",
"merge.maegz",
"-o",
"filter_titles.csv"],
cwd = self.workdir)
df = pd.read_csv(self.workdir+"filter_titles.csv")
# Detect receptor entries - once minimized, these are set to '0'
#receptors = [x for x in df['s_m_title'].tolist() if ':' in x]
receptors = ['0']
retrieve = receptors+ligands
with open(self.workdir+"filter.titles", 'w') as f:
for t in retrieve:
f.write(t+"\n")
f.close()
subprocess.run(["/home/data/Schrodinger2019_1/utilities/maesubset",
"-title_list_file",
"filter.titles",
"merge.maegz",
"-o",
name+"convergence.maegz"],
cwd = self.workdir)
# In[60]:
initial_members = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*out_pv.mae*")
# In[61]:
len(initial_members)
# In[62]:
g1 = Generation(20, initial_members, "/home/data/mashas/tamir/T2R10_cucurbitacins/ligands/bitterdb_tp_tn_dude_decoys.mae", 45, 408)
# In[63]:
#g1.construct_screen('noligand')
# In[64]:
grids = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*grid.in")
# In[65]:
g1.grids = grids
# In[56]:
#g1.run_grids()
# In[66]:
grids = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*grid.zip")
# In[67]:
len(grids)
# In[68]:
# Assemble self.docks
docks = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock.in")
# In[69]:
docks[0]
# In[70]:
len(docks)
# In[71]:
'139' in docks[0]
# In[72]:
# Remove completed runs
completed = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock*maegz")
print(len(completed))
for x in docks:
name = x.split('icb')[-1].split('out_pv')[0]
for i in completed:
if name in i:
docks.remove(x)
# In[73]:
len(docks)
# In[74]:
name
# In[75]:
g1.docks = docks
# In[76]:
len(g1.docks)
# In[77]:
g1.run_docking()
# In[78]:
docks = glob.glob("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/*dock_pv.maegz")
# In[79]:
len(docks)
# In[80]:
# Remove from
# In[81]:
g1.docks = docks
# In[82]:
g1.load_docking_results()
# In[83]:
g1.analyse_enrichment('g1_allmetrics.csv')
# In[ ]:
g1.metrics = pd.read_csv(g1.workdir+'g1_allmetrics.csv')
# In[ ]:
g1.workdir
# In[ ]:
g1.fetch_top_screens(10)
# In[ ]:
g1.metrics.to_csv(g1.workdir+'g1_allmetrics_with_rank.csv')
# In[ ]:
sns.set(context="notebook", style="ticks")
plt.figure(figsize=(9,6))
sns.scatterplot(x='AUC',
y='prauc',
marker='.',
s=60,
alpha=1,
#hue="dataset",
#style="dataset",
edgecolor='',
#palette=cmap,
color=sns.xkcd_rgb['navy'],
data=g1.metrics)
#plt.xlim(0.4, 0.8)
plt.xlabel("ROC AUC")
#plt.ylim(0, 6.5)
plt.ylabel("Precision-Recall AUC")
plt.savefig("/home/data/mashas/tamir/T2R10_cucurbitacins/receptor_models/ifd_cluster_bitterdb/individuals/g1_enrichment.png", format='png', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# In[ ]:
# In[74]:
def plot_pr_multi(screens, color, name):
"""Plot the Precision-Recall curves for multiple provided datasets.
"""
precision = []
recall = []
for s in screens:
se = ScreenEvaluator(s, "nomodel")
#se.write_hitlist()
n_actives = 218
n_decoys = 1121
se.calc_enrichment(n_actives, n_decoys)
se.precision_recall(se.roclist)
precision.append(se.precision)
recall.append(se.recall)
# Plot
plt.figure(figsize=(8,8))
sns.set(context="poster", style="ticks")
f_scores = np.linspace(0.2, 0.8, num=4)
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y>=0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.8, y[45] + 0.02))
for (p, r, c) in zip(precision, recall, color):
plt.plot(r, p, sns.xkcd_rgb[c])
plt.ylim(0,1)
plt.ylabel("Precision")
plt.xlim(0,1)
plt.xlabel("Recall")
plt.hlines([float(10)/float(84)], 0, 1, linestyles='dashed')
plt.savefig(name, dpi=300, format="png", bbox_inches="tight", transparent=True)
plt.show()
plt.close()
# In[75]:
import random
# In[76]:
n = 10
plot_pr_multi(g1.fetch_top_screens(n), random.sample(sns.xkcd_rgb.keys(), n), "/home/data/mashas/tamir/T2R14_gmeiner/antagonists/inactive_state_homology/screening/multi_align_2_ifd/library4_g1_top10_pr-multi-curve.png")
# ### Report PRAUC and MCC
# In[24]:
n = 10
for s in g1.fetch_top_screens(n):
se = ScreenEvaluator(s, "nomodel")
#se.write_hitlist()
n_actives = 10
n_decoys = 74
se.calc_enrichment(n_actives, n_decoys)
se.precision_recall(se.roclist)
print se.metrics['AUC'], se.metrics['AUC_SD'], se.metrics['prauc'], se.metrics['mcc'], se.metrics['f1']
# In[167]:
se.metrics
# In[ ]:
| en | 0.823641 | #!/usr/bin/env python # coding: utf-8 # In[1]: # # Notes # This notebook will contain a class and methods that enable the automatic refinement of homology models for virtual screening. The process will require the following: # 1. A retrospective screening library containing known actives and decoys. # 2. Selection criteria for the pose to converge upon, consisting of a ligand identity and the number of top-ranked models required to converge. When this pose is formed by the requested number of the top-ranked models, the refinement ends. # 3. Binding site location. # # The following methods will be defined: # 1. Screening - to conduct the screen of the specified library at a model in the selected binding site location. # 2. Minimization - to minimize the binding site residues around the specified convergence pose. # 3. Enrichment - to calculate the enrichment metrics for a completed screen. # 4. Convergence - to detect whether the top-ranked models from a generation have converged. # 5. Refine - a wrapper method to perform the minimization and screening methods at the generation level, since the entire generation is refined simultaneously. # # The classes used to structure this process: # # "Generation" - a collection of "Screens". The Convergence method operates at the "Generation" level. # # "Screen" - a single screening result, consisting of a protein with a list of hits retrieved by the protein. Each screening result will have its enrichment metrics associated with it, so that the top-ranked models can be easily selected. The "Screening" and "Enrichment" methods operate at the "Screen" level. In particular, the "Screening" method populates a "Screen" by running a virtual screening calculation on the "Screen" object's protein model. # # "Model" - contains a single protein conformation. This object will have the "Minimization" method as well as methods for exporting the protein coordinates. # # # Thoughts # Do I need to build a job monitor to make this work? Some kind of process that can schedule jobs and report the results back when the jobs finish? Otherwise, the whole script could be stuck waiting for a single docking job to finish. # # Definitely need a way to schedule multiple jobs simultaneously. Could do this with a mix of manual and automated - for example, could write out a big file of Schrodinger terminal commands and then just manually give that to the Schrodinger server in Bash. Then, could re-run this notebook on the output of those jobs when they finish - that way, the notebook does the heavy lifting of checking for convergence and writing job submission files, but doesn't have to be aware of when the jobs finish. An added bonus, the notebook won't be able to automatically submit jobs so things won't get out of hand by accident. # # This means that a Screen object should be able to be created from a .maegz file, for easy input of the completed jobs when they finish. Simple way to do this is to have two kinds of Screen object - a ScreenConstructor, and a ScreenEvaluator # I'd like to be able to do the convergence check within MDTraj, without needing to run the RMSD calculations through Maestro. But, this might cause problems with the binding site location when starting a new generation, because MDTraj and Maestro might shift around coordinate spaces / residue numbering. Watch out for this. So far the coordinate space seems to remain consistent. # File Nomenclature: # # Models are named by Generation, Screen, and Model: "0-1-2.pdb" # # Screens are named by Generation and Screen: "0-1_{grid/dock}.maegz" # In[ ]: # In[2]: The protein-ligand complex coordinates, with a minimize method. Attributes ---------- pose : mdtraj.Trajectory object Initialise the Model with an MDTraj Trajectory object created from the given Maestro file. The input file must contain multiple entries, with the first entry containing just the protein and subsequent entries containing ligands. This is the default structure for docking output .maegz files. The protein-only entry is extracted using 'maesubset', and is used in grid generation. The MDTraj Trajectory object, however, must contain a ligand to enable centroid position calculations. So, the complex must be assembled before loading into MDTraj - this is accomplished by 'structcat'. To do this, the desired ligand file has to be extracted from the docking output .maegz. This file is identified by the Maestro Title string, extracted using 'maesubset' and merged. Currently, the ligand chosen is just the second entry. Parameters ---------- filename : str Filename for the ligand-protein coordinates. The file must be a Maestro format .maegz file containing a single protein molecule as the first entry, and any number of ligand molecules in subsequent entries. ligand : str Title of the ligand to be used for assembling the complex. This ligand will feature in the model minimization, and so is an important choice as it will guide the model architecture during refinement. The string must correspond to the Maestro title of the ligand entry in the input pose-viewer file. This field is termed 's_m_title' in Maestro's internal referencing available with the proplister tool. If the string 'noligand' is provided, the model will be setup with the first ligand listed in the docking input file. # Cast the provided filename as an absolute path to enable processing in # the same directory # The Model is useless without a ligand. So, check if the specified ligand # is available within the input. # However, when assembling a new model (i.e., not one from docking output) # a complex with the desired ligand may not be available. In this case, # use the 'noligand' title as a manual override of sorts to force # model loading. # If the self.ligand attribute is set to 'noligand', then this model # does not come from a docking run containing the desired convergence # ligand. Therefore, all we really need from the ligand is the know # the binding site location. The first ligand in the file will do. # Use the proplister function to get this information, and update # the self.ligand attribute to have the new ligand name. # checkligand_p = subprocess.Popen(["/home/data/Schrodinger2018_3/utilities/maesubset", # "-title", # self.ligand, # self.input_file], # stdout = checkligand, # cwd = os.path.dirname(self.input_file)) # checkligand_p.wait() Run the Model's file-conversion routines and load the MDTraj object. This will perform multiple operations: 1. The protein structure is extracted to <self.name>-protein.mae 2. All entries matching the provided ligand name are saved to <self.name>-matchedligands.mae. 3. The ligand structure is extracted to <self.name>-ligand.mae 4. The protein-ligand complex is assembled to <self.name>-merge.mae. This file contains two entries in the Maestro file. 5. The protein-ligand complex file is merged to a single Maestro entry, saved to <self.name>-complex.mae. 6. The merged complex is saved to PDB to allow reading by MDTraj, saved to <self.name>-complex.pdb. The resulting MDTraj Trajectory object is stored in self.pose. # Extract the protein from the pose-viewer format multi-entry .maegz # ASSUMPTION: This step assumes that the first entry in the pose-viewer file # is the receptor. Usually, this will be the case. # With no delay, the script doesn't seem able to read the -matchedligands.mae file. # Subsequent execution with the above block commented out works as normal. # Perhaps it just takes a little bit of time to write the file? # Try using a 5 sec delay. Works. 1 second also works, but sometimes fails. #time.sleep(3) # Alternatively, try waiting for the process to terminate Use the MDTraj Trajectory object to automatically identify the ligand residue name. This process relies on the ligand consisting of a single molecule possessing a single residue name. Single-atom molecules are explicitely ignored for this step, as some protein hydrogen atoms are not always correctly identified as protein. The ligand residue is then selected as the residue possessing the smallest number of atoms. Returns ------- str A string containing the residue name of the ligand Return the molecules detected by MDTraj. Returns ------- list The length of the list corresponds to the number of molecules detected. Each element of the list is the number of atoms in each molecule Use the MDTraj Trajectory object to find the centroid of the ligand. Returns ------- list A three-element list containing the X, Y, and Z coordinates of the position corresponding to the centroid of the ligand atoms. Write the Schrodinger command to minimize the ligand-protein complex contained in the model object. This method tries to automatically identify the ligand. For this, the PDB-export version of the original Maestro file is used, as MDTraj cannot read Maestro formats. Parameters ---------- name : str The name of the Maestro cordinate file to be used, excluding the file path and extension. The minimization command will use this file, and the options file is also named using this string. Returns ------- string A shell command to use the Schrodinger suit to minimize the model. This command can be executed any time, using the coordinate file written. The command references an options file, which is written to the disk using the name parameter # Write the minimization job input file # This can be executed using the following in the terminal: # >>$SCHRODINGER/prime jobname # Identify the ligand #command = "/home/data/Schrodinger2018_3/prime " + jobname # In[35]: # In[36]: # In[27]: # In[28]: # In[29]: # In[31]: # In[ ]: # In[ ]: # In[3]: In some cases, protein structure files may be generated without any bound or docked ligands. Generating a grid from these structures requires identifying a binding site independantly of any bound ligand. This can be done by specifying a set of residues defining the borders of the site and defining an equidistant center. Parameters ---------- filename : str PDB file containing a single protein without any ligands. Identify the center of the provided residues. Parameters ---------- selected_residues : lst Collection of residue index numbers drawn from the topology of the file. Convert the input file to a MAE format file. Write the grid.in file for running the grid generation job. # In[177]: # In[178]: # In[179]: # In[180]: # In[87]: # In[88]: # In[181]: # In[182]: # In[92]: # In[171]: # In[175]: # In[ ]: # In[3]: Given a Maestro file containing the results of an Induced Fit Docking calculation (i.e., multiple entries each containing a protein and ligand), produce a "docking output" style of Maestro file (i.e., containing a single protein molecule as the first entry, followed by another entry for the ligand). Parameters ---------- name : str Filename for the IFD results file. The file must be a Maestro format .maegz file containing a single entry, consisting of a ligand-protein complex. Execute the conversion of the IFD docking results file into a Maestro Pose Viewer file. # Split the IFD file into protein and ligand # Use subprocess.Popen to allow specifying of the directory # In[12]: # In[13]: # In[18]: # In[19]: # In[20]: # In[21]: # In[ ]: # In[4]: Initialise the ScreenConstructor with the paths to the ligand-protein model and retrospective screening library files. The model path is used to add a Model object. Parameters ---------- model_name : str Name of the docking output .maegz file, beginning with an entry consisting of a single molecule protein and followed by at least one entry consisting of a single molecule ligand ligand : str Label for the ligand used to load the model. library_path : str Path to the retrospective screening library, containing the actives and decoys to be screened. Write the Schrodinger input files and return the name of the grid.in file. # Load the Model's coordinates - includes a check for ligand presence # Identify the ligand centroid # Grid generation options Write the Schrodinger input files and return the execution command to perform screening, using the created grid and the specified library file. # In[5]: Initialise the ScreenEvaluator with the screening results file. This populates the self.results object. The Model object is not populated upon initialization, to allow rapid inspection of ScreenEvaluator objects. The self.results object contains the hitlist from the screening experiment as a Pandas dataframe, specifying the Maestro Title, Docking Score, and Decoy status of each hit. Parameters ---------- model_path : str Name of the docking output .maegz file, beginning with an entry consisting of a single molecule protein and followed by at least one entry consisting of a single molecule ligand ligand : str Name of the ligand used to load the model. This is passed directly to the Model() class method. Options include any string corresponding to a ligand title, and 'noligand', which triggers automatic identification of the ligand. Alternatively, to bypass model loading, can pass 'nomodel' # Extract the project table results from the docking output file # Model loading is a slow step, and not required if a hitlist # has already been written (usually) Write the docking hitlist to disk. The hitlist is saved as a .csv with the following fields: s_m_title : The Maestro title field for each entry. r_i_docking_score : Docking score for each hit. s_user_RetroScreenRole : type of compound (active/decoy) This data is loaded into self.results as a Pandas dataframe. Function used in AUC calculation to build a score tally based on the relationship between an active score and a decoy score. Currently, this function is set to assume that lower scores are better. Accordingly, the AUC is increased when an active has a lower score than a decoy. Parameters ---------- active_score : float A docking score for an active compound. decoy_score : float A docking score for a decoy compound. Calculate the ROC AUC for a set of actives and decoys. This routine scales the AUC number depending on how many of the actives and decoys present in the input are present in the score lists. The user provides the number of actives and decoys originally used as input to the screen, and the number of items in the actives_scores and decoys_scores lists are used to determine the scaling of the AUC. Parameters ---------- actives_scores : list List of docking scores for actives decoys_scores : list List of docking scores for decoys n_actives : int Number of active compounds used as input to this screen n_decoys : int Number of decoy compounds used as input to this screen Returns ------- auc : float A scaled AUC that reflects the probability that an active will score better than a decoy. Calculate the enrichment factor at the selected threshold. The EF is calculated as the ratio between an arbitrarily-selected fraction of inactives and the fraction of actives retrieved at that fraction of inactives. This definition also corresponds to the gradient of a line constructured from the origin to the ROC-curve value coordinate at the selected inactive fraction. Parameters ---------- threshold : float The fraction of inactives at which to determine the EF. I.e., EF1% corresponds to a threshold of 0.01. n_actives : int Number of actives used as input to the screen n_decoys : int Number of decoys used as input to the screen. Returns ------- ef : float The enrichment factor. sd : float The analytical error of the enrichment factor Calculates a 95% confidence interval for any metric, with the assumption that the distribution is normal. Caveat from the original paper: "This will break down for systems with very large errors (generally due to having very few actives or decoys) ... When large errors do arise the exact magnitude of the error is generally less relevant than the fact that the error is large" Parameters ---------- variance : float The variance of any metric Returns ------- ci95 : float The 95% confidence interval. Adding and subtracting this value from the mean value of the metric will give the values that define the borders of 95% confidence for that value. Calculate the precision-recall profile and associated summary statistics (F1 score, Matthews correlation quality, Youden's J, PR-AUC). The summary statistics are defined for every point along the precision-recall curve, so will report the maximal value. Parameters ---------- hitlist : list A list containing 'active' and 'decoy' entries. Can be generated from the self.results.role Pandas sheet column. # Move the threshold through the hitlist # At each step, calculate the precision and recall #print len(toplist), len(bottomlist), "TP: ", tp, "FP: ", fp, "TN: ", tn, "FN: ", fn # Calculate the summary statistics # Calculate PRAUC # Store the summary statistics # Store the score cutoff associated with the maximum Youden's J index Calculate enrichment metrics for this screen. The metrics are stored in the self.metrics dictionary object. Parameters ---------- n_actives : int The number of actives in the input library used to perform the screen. n_decoys : int The number of decoys in the input library used to perform the screen. # Load hitlist # Remove duplicate items from the hitlist. # Extract score lists for actives and decoys # Prepare ROC lists #Scale the coordinates by the total numbers # Populate the self.enrichment object with the metrics Plot the ROC curve and save to a file named <self.name>_roc.png. #Scale the coordinates by the total numbers Plot the ROC curve and add text labels to compounds specified. Parameters ---------- color : str The colour to plot the ROC curve in. watch_compound : str Text label to match against the "compoundset" property for selecting the marked compounds. watch_role : str Text label to match against the "role" property for selecting the marked compounds. #Scale the coordinates by the total numbers Plot the Precision-Recall curve. # In[41]: # In[42]: # In[43]: # In[44]: # In[45]: # In[46]: # In[48]: # # Putting it all together # A generation object can be a wrapper for all of the functions that need to be run on all the screens in a generation. These are: # 1. Make receptor grids # 2. Dock the library # 3. Save enrichment metrics # 4. Minimize receptors in complex with convergence ligands # # One important thing to include is a way to check which steps finish successfully for each member. When there's an error, it would be convenient to have a simple way to re-run a step only on the ones that failed. # # Also, a better way to schedule jobs with a "check-in, check-out" system. At the moment, there's an arbitrarily coded delay time to make sure the license server has time to update the number of jobs licenses currently using a license to ensure that we don't submit a job when there are no licenses available. However, this doesn't allow for fast and accurate submission, so the load-balancing is inefficient (especially for minimization jobs). A system which keeps track of which jobs have started, are running, and have finished would be able to more finely manage this process. # In[59]: The Generation object indexes a collection of screens and provides a wrapper for functions that are run on all the screens. Initialise a Generation object with the provided models. These can be docking output files (.maegz) or pregenerated grid (.zip) files. Parameters ---------- n_processes : int Number of simultaneous processes through which to run. members : list List of screen filepaths. library : str Filepath for the screening library. n_actives : int Number of actives. n_decoys : int Number of decoys. Parallel utility to write the minimization job input. # If the model can't load, there is no pose for the specified ligand. # If the ligand isn't found in the file, then no pose for the ligand. # In these cases, don't want to run the minimization job, so don't # return any filename. Write the minimization run files for the generation members. Parameters ---------- ligand : str Title for the ligand to pass to Model. Rename the minimization run files to specify the names for the next generation of model files that minimization produces. If the minimization output file already exists, or if the required input file is not present, the minimization run file is not renamed but rather stored in self.min_exists or self.min_unprepared, respectively. Parameters ---------- name : str Label to insert into the filename # Check if the minimization parameter file already exists # Check if the required input file is present Perform the minimization runs. Parallel worker function to split the provided minimization output file by molecule. Splits the filename by 'min', such that X-min-out.maegz is written as a multi-entry file to X-min.maegz. NOTE: This process RENAMES the ligand entry in the minimization output file, to the same name as the receptor. So, the ligand in these split files CANNOT be directly selected for grid generation. Parameters ---------- x : str Filepath to output of minimization run. Must contain the characters 'min' for filename manipulations. Split the minimization job output files into protein & ligand entires. Then transfer the list of split files to self.members and tests whether each output file can be globbed. Parallel worker function to run ScreenConstructor methods. Parameters ---------- func_input : list Input parameters for the ScreenConstructor method: Element 0: File path to a .maegz for which the screen will be constructed. Element 1: String to identify the ligand for model loading. Can also be "noligand". Element 2: File path to screening library file. A parallel utility to write dock.in files using pregenerated grid .zip files instead of .maegz docking output files. Parameters ---------- func_input : list Input parameters for the utility: Element 0: File path to a .maegz for which the screen will be constructed. Element 1: String to identify the ligand for model loading. Can also be "noligand". Element 2: File path to screening library file. Runs the ScreenConstructor.gridgen() and .dock() methods on each member of the generation. Alternatively, if the first member of self.members is a .zip file, this method will write dock.in files using the members as pre-generated grids. NOTE: If using split files from self.mins_to_members, the ligand titles will be renamed. Must use 'noligand' to allow automatic selection of the ligand. Parameters ---------- ligand : str Title of the ligand to use for grid centering. Query the Schrodinger license server to get the number of currently occupied Glide licenses. Submit the provided grid.in or dock.in files to the Schrodinger job control queueing system. Configured to use the "dragon_serial_mashaq" queue. Will submit jobs if fewer than 100 of the "GLIDE_SUITE_22JUN2017" licenses are in use. Parameters ---------- dockuns : list File paths for grid.in or dock.in files # Note that the label must include the 'Users of' prefix # to avoid matching subsequent entries for the same category #glide_label = 'Users of GLIDE_SUITE_22JUN2017' #glide_label = 'Users of GLIDE_SUITE_05JUN2019' #print("Number of submitted jobcodes: " + str(len(jobcodes))) # 1min sleep Perform the grid generation runs. Perform the docking runs. Generate the filepaths for the docking results, and test if each result file actually exists. A parallel utility to write the hitlist file for the provided screen. Parameters ---------- screen : str Filepath for a docking result file. Analyse the enrichment metrics for the provided screens, and save the collected results to the provided filename. Parameters ---------- name : str Filename for the enrichment metrics CSV file. # First, write hitlists in parallel Load the enrichment metric data for the given screens. Parameters ---------- refs : list Filepaths for reference screens to load. # # First, write hitlists in parallel # self.pool.clear() # self.pool.restart() # output = self.pool.map(self.pool_hitlist_writer, self.screens) # self.pool.close() Calculate the distance metric from maximum enrichment. Return the names of the n-top enriching screens, as measured by the distance metric calculated by "self.metric_distance". Parameters ---------- n : int The number of top screens to fetch. Saves a file containing the receptors and specified ligands from the given screens. The output file is titled 'name_convergence.maegz'. Parameters ---------- name : str A label to be prefixed to the saved convergence pose file. screens : list List of screens to merge and filter. ligands : list List of title for ligands selected to save out poses for. # Detect receptor entries - once minimized, these are set to '0' #receptors = [x for x in df['s_m_title'].tolist() if ':' in x] # In[60]: # In[61]: # In[62]: # In[63]: #g1.construct_screen('noligand') # In[64]: # In[65]: # In[56]: #g1.run_grids() # In[66]: # In[67]: # In[68]: # Assemble self.docks # In[69]: # In[70]: # In[71]: # In[72]: # Remove completed runs # In[73]: # In[74]: # In[75]: # In[76]: # In[77]: # In[78]: # In[79]: # In[80]: # Remove from # In[81]: # In[82]: # In[83]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]: #hue="dataset", #style="dataset", #palette=cmap, #plt.xlim(0.4, 0.8) #plt.ylim(0, 6.5) # In[ ]: # In[74]: Plot the Precision-Recall curves for multiple provided datasets. #se.write_hitlist() # Plot # In[75]: # In[76]: # ### Report PRAUC and MCC # In[24]: #se.write_hitlist() # In[167]: # In[ ]: | 2.370671 | 2 |
GOL.py | pirate-505/python-pattern-draw | 0 | 6618082 | <reponame>pirate-505/python-pattern-draw<filename>GOL.py
import patterner as pt
from random import random
import time
pause = False
# create randomized 60*60 matrix
field = pt.rnd_matrix(60)
'''
# create planer for classic GOL rule
field = pt.filled_matrix(50, 0)
field[9][8] = 1
field[9][9] = 1
field[9][10] = 1
field[8][10] = 1
field[7][7] = 1
'''
p = pt.Patterner(field, 10)
l = len(field)
# 'maze' ruleset
# Format is "number of cells: probability".
# The "probability" thing adds some random
# Not really a finite-state machine now, just added this for fun.
# To make it behave as normal Game of Life, make all the "probabilities" equal to 1
alive_conditions = {'1': 0.95, '2': 0.95, '3': 0.95, '4': 0.95, '5': 0.95}
born_conditions = {'3': 0.3}
#'maze' ruleset
# alive = [1, 2, 3, 4, 5]
# born = [3]
print("%s x %s field generated" % (len(field), len(field[0])))
def tor(c):
'''Toroidal closed surface'''
return c + l if c < 0 else c % l
def gol_step(y, near):
# cell birth condition
if (y == 0 and str(near) in born_conditions):
return 1 if born_conditions[str(near)] > random() else 0
# cell survives condition
if (y == 1 and str(near) in alive_conditions):
return 1 if alive_conditions[str(near)] > random() else 0
# else die
return 0
def sum_near(m, i, j):
'''Calculate the sum of all neighbour cells'''
s = 0
for ii in range(-1, 2):
for jj in range(-1, 2):
s += m[tor(i + ii)][tor(j + jj)]
s -= m[i][j]
return s
def iterate(m):
# New state matrix
f2 = [row[:] for row in m]
for i in range(l):
for j in range(l):
near = sum_near(m, i, j)
f2[tor(i)][tor(j)] = gol_step(m[tor(i)][tor(j)], near)
return f2
def key1_callback(event):
global pause
pause = not pause
print("pause: ", pause)
def key3_callback(event):
print("Saved as ", p.save_image())
p.bind("<Button-1>", key1_callback) # press LMB to pause/resume
p.bind("<Button-3>", key3_callback) # press RMB to save current state as png image
iteration = 0
time_counter = time.time()
# basic main loop
while (True):
p.tk_update()
if pause:
time.sleep(0.05)
continue
if time.time() - time_counter >= 0.1:
time_counter = time.time()
p.clean()
p.set_matrix(field)
p.draw()
field = iterate(field)
iteration += 1
print('Iteration %d' % iteration)
time.sleep(0.05)
input()
| import patterner as pt
from random import random
import time
pause = False
# create randomized 60*60 matrix
field = pt.rnd_matrix(60)
'''
# create planer for classic GOL rule
field = pt.filled_matrix(50, 0)
field[9][8] = 1
field[9][9] = 1
field[9][10] = 1
field[8][10] = 1
field[7][7] = 1
'''
p = pt.Patterner(field, 10)
l = len(field)
# 'maze' ruleset
# Format is "number of cells: probability".
# The "probability" thing adds some random
# Not really a finite-state machine now, just added this for fun.
# To make it behave as normal Game of Life, make all the "probabilities" equal to 1
alive_conditions = {'1': 0.95, '2': 0.95, '3': 0.95, '4': 0.95, '5': 0.95}
born_conditions = {'3': 0.3}
#'maze' ruleset
# alive = [1, 2, 3, 4, 5]
# born = [3]
print("%s x %s field generated" % (len(field), len(field[0])))
def tor(c):
'''Toroidal closed surface'''
return c + l if c < 0 else c % l
def gol_step(y, near):
# cell birth condition
if (y == 0 and str(near) in born_conditions):
return 1 if born_conditions[str(near)] > random() else 0
# cell survives condition
if (y == 1 and str(near) in alive_conditions):
return 1 if alive_conditions[str(near)] > random() else 0
# else die
return 0
def sum_near(m, i, j):
'''Calculate the sum of all neighbour cells'''
s = 0
for ii in range(-1, 2):
for jj in range(-1, 2):
s += m[tor(i + ii)][tor(j + jj)]
s -= m[i][j]
return s
def iterate(m):
# New state matrix
f2 = [row[:] for row in m]
for i in range(l):
for j in range(l):
near = sum_near(m, i, j)
f2[tor(i)][tor(j)] = gol_step(m[tor(i)][tor(j)], near)
return f2
def key1_callback(event):
global pause
pause = not pause
print("pause: ", pause)
def key3_callback(event):
print("Saved as ", p.save_image())
p.bind("<Button-1>", key1_callback) # press LMB to pause/resume
p.bind("<Button-3>", key3_callback) # press RMB to save current state as png image
iteration = 0
time_counter = time.time()
# basic main loop
while (True):
p.tk_update()
if pause:
time.sleep(0.05)
continue
if time.time() - time_counter >= 0.1:
time_counter = time.time()
p.clean()
p.set_matrix(field)
p.draw()
field = iterate(field)
iteration += 1
print('Iteration %d' % iteration)
time.sleep(0.05)
input() | en | 0.836582 | # create randomized 60*60 matrix # create planer for classic GOL rule field = pt.filled_matrix(50, 0) field[9][8] = 1 field[9][9] = 1 field[9][10] = 1 field[8][10] = 1 field[7][7] = 1 # 'maze' ruleset # Format is "number of cells: probability". # The "probability" thing adds some random # Not really a finite-state machine now, just added this for fun. # To make it behave as normal Game of Life, make all the "probabilities" equal to 1 #'maze' ruleset # alive = [1, 2, 3, 4, 5] # born = [3] Toroidal closed surface # cell birth condition # cell survives condition # else die Calculate the sum of all neighbour cells # New state matrix # press LMB to pause/resume # press RMB to save current state as png image # basic main loop | 2.972952 | 3 |
if_else.py | oddotter/Python_Learning_history | 0 | 6618083 | list_of_computers = []
list_of_computers = ["comp1", "comp2", "comp3"]
list_of_computers
list_of_used_computers = []
list_of_used_computers = ["comp136", "comp187"]
list_of_used_computers
all_computers = list_of_computers + list_of_used_computers
all_computers
if "comp136" in list_of_used_computers:
print("Computer is in use")
else:
print("Computer is not in use")
| list_of_computers = []
list_of_computers = ["comp1", "comp2", "comp3"]
list_of_computers
list_of_used_computers = []
list_of_used_computers = ["comp136", "comp187"]
list_of_used_computers
all_computers = list_of_computers + list_of_used_computers
all_computers
if "comp136" in list_of_used_computers:
print("Computer is in use")
else:
print("Computer is not in use")
| none | 1 | 3.839917 | 4 | |
addp/device.py | VENULLLC/addp | 0 | 6618084 | #!/usr/bin/env python
"""
Emulate a device running the ADDP service
Device is discoverable by the "Digi Device Discovery" program
"""
import sys
import struct
import socket
from addp import parse_frame, build_response
if __name__ == "__main__":
HOST, PORT = "172.16.31.10", 2362
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass
mreq = struct.pack("4sI", socket.inet_aton(HOST),
socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP, mreq)
sock.bind(("", PORT))
sys.stderr.write('Listening for ADDP requests on port {0}, Ctrl-C to stop.\n'.format(PORT))
while True:
try:
data, addr = sock.recvfrom(2048)
except KeyboardInterrupt:
print('Exiting...')
sys.exit(0)
if data is None or data == '':
break
info = parse_frame(data)
if info:
print('Responing to discover from', addr)
ret = build_response(info)
if ret and ret != "":
sock.sendto(ret, addr)
| #!/usr/bin/env python
"""
Emulate a device running the ADDP service
Device is discoverable by the "Digi Device Discovery" program
"""
import sys
import struct
import socket
from addp import parse_frame, build_response
if __name__ == "__main__":
HOST, PORT = "172.16.31.10", 2362
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass
mreq = struct.pack("4sI", socket.inet_aton(HOST),
socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP, mreq)
sock.bind(("", PORT))
sys.stderr.write('Listening for ADDP requests on port {0}, Ctrl-C to stop.\n'.format(PORT))
while True:
try:
data, addr = sock.recvfrom(2048)
except KeyboardInterrupt:
print('Exiting...')
sys.exit(0)
if data is None or data == '':
break
info = parse_frame(data)
if info:
print('Responing to discover from', addr)
ret = build_response(info)
if ret and ret != "":
sock.sendto(ret, addr)
| en | 0.852213 | #!/usr/bin/env python Emulate a device running the ADDP service
Device is discoverable by the "Digi Device Discovery" program | 2.606719 | 3 |
Unet_v2/segmentation_mapping-master/get_train_test_kfold.py | healthinnovation/aerial-image-analysis | 0 | 6618085 | '''
Splitthe data in kfold
'''
from pathlib import Path
from sklearn.model_selection import KFold
import numpy as np
def get_split_out(data_path,name_file, fold, num_splits=5):
#data_path=Path(path)
train_path = data_path / name_file / 'images'
train_file_names = np.array(sorted(list(train_path.glob('*npy'))))
kf = KFold(n_splits=num_splits, random_state=2019,shuffle=True)
ids = list(kf.split(train_file_names))
train_ids, val_ids = ids[fold]
if fold == -1:
return train_file_names, train_file_names
else:
return train_file_names[train_ids], train_file_names[val_ids]
#if __name__ == '__main__':
# train_file_names,val_file_names = get_split('data_HR','data',1)
def percent_split(train_val_100percent, percent = 1):
fpath_list = train_val_100percent
dataset_size = len(fpath_list)
indices = list(range(dataset_size))
percent = int(np.floor(percent * dataset_size))
if 1 :
np.random.seed(2019)
np.random.shuffle(indices)
extra_indices, train_indices_split = indices[percent:], indices[:percent]
print(dataset_size,len(train_indices_split), len(extra_indices))
return train_val_100percent[extra_indices],train_val_100percent[train_indices_split] #
def get_split_in(train_file_names, fold, num_splits=5):
kf = KFold(n_splits=num_splits, random_state=2019,shuffle=True)
#kf = KFold(n_splits=num_splits, random_state=20018)
ids = list(kf.split(train_file_names))
train_ids, val_ids = ids[fold]
if fold == -1:
return train_file_names, train_file_names
else:
return train_file_names[train_ids], train_file_names[val_ids]
#if __name__ == '__main__':
# train_file_names_2,val_file_names_2 = get_split_in(train_file_names,0) | '''
Splitthe data in kfold
'''
from pathlib import Path
from sklearn.model_selection import KFold
import numpy as np
def get_split_out(data_path,name_file, fold, num_splits=5):
#data_path=Path(path)
train_path = data_path / name_file / 'images'
train_file_names = np.array(sorted(list(train_path.glob('*npy'))))
kf = KFold(n_splits=num_splits, random_state=2019,shuffle=True)
ids = list(kf.split(train_file_names))
train_ids, val_ids = ids[fold]
if fold == -1:
return train_file_names, train_file_names
else:
return train_file_names[train_ids], train_file_names[val_ids]
#if __name__ == '__main__':
# train_file_names,val_file_names = get_split('data_HR','data',1)
def percent_split(train_val_100percent, percent = 1):
fpath_list = train_val_100percent
dataset_size = len(fpath_list)
indices = list(range(dataset_size))
percent = int(np.floor(percent * dataset_size))
if 1 :
np.random.seed(2019)
np.random.shuffle(indices)
extra_indices, train_indices_split = indices[percent:], indices[:percent]
print(dataset_size,len(train_indices_split), len(extra_indices))
return train_val_100percent[extra_indices],train_val_100percent[train_indices_split] #
def get_split_in(train_file_names, fold, num_splits=5):
kf = KFold(n_splits=num_splits, random_state=2019,shuffle=True)
#kf = KFold(n_splits=num_splits, random_state=20018)
ids = list(kf.split(train_file_names))
train_ids, val_ids = ids[fold]
if fold == -1:
return train_file_names, train_file_names
else:
return train_file_names[train_ids], train_file_names[val_ids]
#if __name__ == '__main__':
# train_file_names_2,val_file_names_2 = get_split_in(train_file_names,0) | en | 0.570925 | Splitthe data in kfold #data_path=Path(path) #if __name__ == '__main__': # train_file_names,val_file_names = get_split('data_HR','data',1) # #kf = KFold(n_splits=num_splits, random_state=20018) #if __name__ == '__main__': # train_file_names_2,val_file_names_2 = get_split_in(train_file_names,0) | 2.691529 | 3 |
cotas.py | hlruffo/Graphic-interface | 0 | 6618086 | import requests
import json
from PySimpleGUI import PySimpleGUI as sg
import time
#extrair para cota dolar
# lógica que quero implementar no output-> se marcado no checkbox = TRUE chamar função
def dolar():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_dolar = cotas['USDBRL']['bid']
print(f'A cotação do Dólar é {cota_dolar} Reais.\n')
#extrair para cota euro
def euro():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_euro = cotas['EURBRL']['bid']
print(f'A cotação do Euro é {cota_euro} Reais.\n')
#extrair para cota bitcoin
def bitcoin():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_bitcoin = cotas['BTCBRL']['bid']
print(f'A cotação do Bitcoin é :{cota_bitcoin} Reais.\n')
class TelaPython:
def __init__(self):
#layout
layout = [
[sg.Text('Nome', size=(5,0)),sg.Input(size=(25,0),key='nome')],
[sg.Text('Selecione a cotação desejada:')],
[sg.Checkbox('Dólar', key='dolar'),sg.Checkbox('Euro', key='euro'),sg.Checkbox('Bitcoin', key='bitcoin')],
[sg.Button('Receber informação')],
[sg.Output(size=(40,20))]
]
#janela
self.janela = sg.Window("Cotações",).layout(layout)
def Iniciar(self):
while True:
#extrair dados da tela
self.button, self.values = self.janela.Read()
nome = self.values['nome']
informar_dolar = self.values['dolar']
informar_euro = self.values['euro']
informar_bitcoin = self.values['bitcoin']
A = time.strftime('%H:%M:%S', time.localtime())
B = time.strftime('%d-%m-%y', time.localtime())
print(f'Olá {nome},\n às {A} de {B} ')
if informar_dolar == True:
dolar()
if informar_euro == True:
euro()
if informar_bitcoin == True:
bitcoin()
tela = TelaPython()
tela.Iniciar()
"""
""" | import requests
import json
from PySimpleGUI import PySimpleGUI as sg
import time
#extrair para cota dolar
# lógica que quero implementar no output-> se marcado no checkbox = TRUE chamar função
def dolar():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_dolar = cotas['USDBRL']['bid']
print(f'A cotação do Dólar é {cota_dolar} Reais.\n')
#extrair para cota euro
def euro():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_euro = cotas['EURBRL']['bid']
print(f'A cotação do Euro é {cota_euro} Reais.\n')
#extrair para cota bitcoin
def bitcoin():
cotas = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotas = cotas.json()
cota_bitcoin = cotas['BTCBRL']['bid']
print(f'A cotação do Bitcoin é :{cota_bitcoin} Reais.\n')
class TelaPython:
def __init__(self):
#layout
layout = [
[sg.Text('Nome', size=(5,0)),sg.Input(size=(25,0),key='nome')],
[sg.Text('Selecione a cotação desejada:')],
[sg.Checkbox('Dólar', key='dolar'),sg.Checkbox('Euro', key='euro'),sg.Checkbox('Bitcoin', key='bitcoin')],
[sg.Button('Receber informação')],
[sg.Output(size=(40,20))]
]
#janela
self.janela = sg.Window("Cotações",).layout(layout)
def Iniciar(self):
while True:
#extrair dados da tela
self.button, self.values = self.janela.Read()
nome = self.values['nome']
informar_dolar = self.values['dolar']
informar_euro = self.values['euro']
informar_bitcoin = self.values['bitcoin']
A = time.strftime('%H:%M:%S', time.localtime())
B = time.strftime('%d-%m-%y', time.localtime())
print(f'Olá {nome},\n às {A} de {B} ')
if informar_dolar == True:
dolar()
if informar_euro == True:
euro()
if informar_bitcoin == True:
bitcoin()
tela = TelaPython()
tela.Iniciar()
"""
""" | pt | 0.624475 | #extrair para cota dolar # lógica que quero implementar no output-> se marcado no checkbox = TRUE chamar função #extrair para cota euro #extrair para cota bitcoin #layout #janela #extrair dados da tela | 3.216596 | 3 |
Q034.py | Linchin/python_leetcode_git | 0 | 6618087 | <reponame>Linchin/python_leetcode_git<filename>Q034.py
"""
34
medium
find first and last position of element in sorted array
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
If target is not found in the array, return [-1, -1].
You must write an algorithm with O(log n) runtime complexity.
"""
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if not nums:
return [-1, -1]
# search for the target
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] == target:
break
elif nums[mid] > target:
right = mid - 1
else:
left = mid + 1
# find range
if nums[mid] != target:
return [-1, -1]
else:
left, right = mid, mid
while left >= 0 and nums[left] == target:
left -= 1
while right <= len(nums) - 1 and nums[right] == target:
right += 1
return [left + 1, right - 1]
sol = Solution()
nums = []
target = 6
print(sol.searchRange(nums, target)) | """
34
medium
find first and last position of element in sorted array
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
If target is not found in the array, return [-1, -1].
You must write an algorithm with O(log n) runtime complexity.
"""
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if not nums:
return [-1, -1]
# search for the target
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] == target:
break
elif nums[mid] > target:
right = mid - 1
else:
left = mid + 1
# find range
if nums[mid] != target:
return [-1, -1]
else:
left, right = mid, mid
while left >= 0 and nums[left] == target:
left -= 1
while right <= len(nums) - 1 and nums[right] == target:
right += 1
return [left + 1, right - 1]
sol = Solution()
nums = []
target = 6
print(sol.searchRange(nums, target)) | en | 0.753559 | 34 medium find first and last position of element in sorted array Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value. If target is not found in the array, return [-1, -1]. You must write an algorithm with O(log n) runtime complexity. # search for the target # find range | 3.888621 | 4 |
metrics/views/metricsCpu.py | BrianWaganerSTL/RocketDBaas_minion | 0 | 6618088 | <gh_stars>0
from sys import platform
import psutil
from django.utils import timezone
from django.http import HttpResponse
def CpuList(request):
if request.method == 'GET':
a = psutil.cpu_times_percent()
if platform == "linux" or platform == "linux2":
if len(a) == 8:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d}' % a
elif len(a) == 9:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d,"guest":%d}' % a
elif len(a) == 10:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d,"guest":%d,"guest_nice":%d}' % a
elif platform == "darwin": # OS X
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d}' % a
elif platform == "win32":
cpuDetails = '"user":%d,"system":%d,"idle":%d,"interrupt":%d,"dpc":%d}' % a
myJson = '{"created_dttm":"%s", %s' % (str(timezone.now().replace(microsecond=0)), cpuDetails)
print('myJson=' + myJson)
return HttpResponse(myJson)
| from sys import platform
import psutil
from django.utils import timezone
from django.http import HttpResponse
def CpuList(request):
if request.method == 'GET':
a = psutil.cpu_times_percent()
if platform == "linux" or platform == "linux2":
if len(a) == 8:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d}' % a
elif len(a) == 9:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d,"guest":%d}' % a
elif len(a) == 10:
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d,"iowait":%d,"irq":%d,"softirq":%d,"steal":%d,"guest":%d,"guest_nice":%d}' % a
elif platform == "darwin": # OS X
cpuDetails = '"user":%d,"nice":%s,"system":%d,"idle":%d}' % a
elif platform == "win32":
cpuDetails = '"user":%d,"system":%d,"idle":%d,"interrupt":%d,"dpc":%d}' % a
myJson = '{"created_dttm":"%s", %s' % (str(timezone.now().replace(microsecond=0)), cpuDetails)
print('myJson=' + myJson)
return HttpResponse(myJson) | none | 1 | 2.272171 | 2 | |
wagtaildemo/settings/docker.py | kaedroho/wagtailapidemo | 3 | 6618089 | <reponame>kaedroho/wagtailapidemo<gh_stars>1-10
from .base import *
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['localhost', 'demo.wagtail.io']
COMPRESS_OFFLINE = True
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'wagtaildemo.sqlite3',
}
}
# Use the cached template loader
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
| from .base import *
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['localhost', 'demo.wagtail.io']
COMPRESS_OFFLINE = True
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'wagtaildemo.sqlite3',
}
}
# Use the cached template loader
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
) | en | 0.340415 | # Use the cached template loader | 1.505112 | 2 |
module1-introduction-to-sql/buddymove_holidayiq.py | DavidVollendroff/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 6618090 | import sqlite3
import pandas as pd
raw_csv_url = 'https://raw.githubusercontent.com/DavidVollendroff/DS-Unit-3-Sprint-2-' \
'SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv'
df = pd.read_csv(raw_csv_url)
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
df.to_sql('buddymove_holidayiq', conn)
curs = conn.cursor()
query = "SELECT * FROM buddymove_holidayiq"
result = curs.execute(query).fetchall()
print(len(result), 'rows present')
query = "SELECT * FROM buddymove_holidayiq WHERE Nature > 100 and Shopping > 100;"
result = curs.execute(query).fetchall()
print(len(result), 'users have reviewed both 100 Nature and 100 shopping.') | import sqlite3
import pandas as pd
raw_csv_url = 'https://raw.githubusercontent.com/DavidVollendroff/DS-Unit-3-Sprint-2-' \
'SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv'
df = pd.read_csv(raw_csv_url)
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
df.to_sql('buddymove_holidayiq', conn)
curs = conn.cursor()
query = "SELECT * FROM buddymove_holidayiq"
result = curs.execute(query).fetchall()
print(len(result), 'rows present')
query = "SELECT * FROM buddymove_holidayiq WHERE Nature > 100 and Shopping > 100;"
result = curs.execute(query).fetchall()
print(len(result), 'users have reviewed both 100 Nature and 100 shopping.') | none | 1 | 3.786377 | 4 | |
src/walutomatpy/wrapped.py | b1r3k/walutomatpy | 0 | 6618091 | <filename>src/walutomatpy/wrapped.py
import uuid
from decimal import Decimal
from typing import List, Tuple
from .models.enums import Offer
from .models.order import WalutomatOrder
from .models.account import AccountBalances
from . import WalutomatClient
class WrappedWalutomatClient(WalutomatClient):
def get_account_balances(self) -> AccountBalances:
result = super().get_account_balances()
return AccountBalances(result)
def get_p2p_best_offers_detailed(self, currency_pair, item_limit=10) -> Tuple[List, List]:
result = super().get_p2p_best_offers_detailed(currency_pair, item_limit)
bids = (Offer(offer['price'], offer['volume']) for offer in result.get('bids', []))
sorted_bids = sorted(bids, key=lambda o: o.price, reverse=True)
asks = (Offer(offer['price'], offer['volume']) for offer in result.get('asks', []))
sorted_asks = sorted(asks, key=lambda o: o.price)
return sorted_bids, sorted_asks
def get_p2p_active_orders(self, item_limit=10):
for result in super().get_p2p_active_orders(item_limit):
yield WalutomatOrder(**result)
def get_p2p_order_by_id(self, order_id) -> List[WalutomatOrder]:
result = super().get_p2p_order_by_id(order_id)
return list(WalutomatOrder(**raw_order) for raw_order in result)
def submit_p2p_order(self, order_id, currency_pair, buy_sell, volume, volume_currency, limit_price, dry=False):
result = super().submit_p2p_order(order_id, currency_pair, buy_sell, volume, volume_currency, limit_price, dry)
return result['orderId']
| <filename>src/walutomatpy/wrapped.py
import uuid
from decimal import Decimal
from typing import List, Tuple
from .models.enums import Offer
from .models.order import WalutomatOrder
from .models.account import AccountBalances
from . import WalutomatClient
class WrappedWalutomatClient(WalutomatClient):
def get_account_balances(self) -> AccountBalances:
result = super().get_account_balances()
return AccountBalances(result)
def get_p2p_best_offers_detailed(self, currency_pair, item_limit=10) -> Tuple[List, List]:
result = super().get_p2p_best_offers_detailed(currency_pair, item_limit)
bids = (Offer(offer['price'], offer['volume']) for offer in result.get('bids', []))
sorted_bids = sorted(bids, key=lambda o: o.price, reverse=True)
asks = (Offer(offer['price'], offer['volume']) for offer in result.get('asks', []))
sorted_asks = sorted(asks, key=lambda o: o.price)
return sorted_bids, sorted_asks
def get_p2p_active_orders(self, item_limit=10):
for result in super().get_p2p_active_orders(item_limit):
yield WalutomatOrder(**result)
def get_p2p_order_by_id(self, order_id) -> List[WalutomatOrder]:
result = super().get_p2p_order_by_id(order_id)
return list(WalutomatOrder(**raw_order) for raw_order in result)
def submit_p2p_order(self, order_id, currency_pair, buy_sell, volume, volume_currency, limit_price, dry=False):
result = super().submit_p2p_order(order_id, currency_pair, buy_sell, volume, volume_currency, limit_price, dry)
return result['orderId']
| none | 1 | 2.275172 | 2 | |
setup.py | dgabbe/bind_json_error_handlers | 0 | 6618092 | <gh_stars>0
from setuptools import setup, find_packages
with open("README.rst", "r") as fh:
long_description = fh.read()
VERSION = '1.1.2'
setup(name='bind_json_error_handlers',
version=VERSION,
description='make app return json errors',
long_description=long_description,
keywords='',
url='https://github.com/eric-s-s/bind_json_error_handlers',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False)
| from setuptools import setup, find_packages
with open("README.rst", "r") as fh:
long_description = fh.read()
VERSION = '1.1.2'
setup(name='bind_json_error_handlers',
version=VERSION,
description='make app return json errors',
long_description=long_description,
keywords='',
url='https://github.com/eric-s-s/bind_json_error_handlers',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False) | none | 1 | 1.349307 | 1 | |
Utils_Intersect.py | ZerethjiN/SweetNightmare | 0 | 6618093 | <filename>Utils_Intersect.py
import Utils_Position as Pos
import Utils_Size as Siz
def intersectXY(pos1, col1, pos2, col2):
x1 = ((pos2.x + col2.x) <= (pos1.x + col1.w))
x2 = ((pos2.x + col2.w) >= (pos1.x + col1.x))
y1 = ((pos2.y + col2.y) <= (pos1.y + col1.h))
y2 = ((pos2.y + col2.h) >= (pos1.y + col1.y))
return (x1 and x2 and y1 and y2) | <filename>Utils_Intersect.py
import Utils_Position as Pos
import Utils_Size as Siz
def intersectXY(pos1, col1, pos2, col2):
x1 = ((pos2.x + col2.x) <= (pos1.x + col1.w))
x2 = ((pos2.x + col2.w) >= (pos1.x + col1.x))
y1 = ((pos2.y + col2.y) <= (pos1.y + col1.h))
y2 = ((pos2.y + col2.h) >= (pos1.y + col1.y))
return (x1 and x2 and y1 and y2) | none | 1 | 2.720407 | 3 | |
nanobrok/ext/restapi.py | retr0-13/nanobroK | 142 | 6618094 | from flask import Blueprint, jsonify
from flask_restplus import Api
from werkzeug.exceptions import HTTPException
from nanobrok.ext.csrf_protect import csrf
# This file is part of the Nanobrok Open Source Project.
# nanobrok is licensed under the Apache 2.0.
# Copyright 2021 p0cL4bs Team - <NAME> (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ExtendedAPI(Api):
"""This class overrides 'handle_error' method of 'Api' class ,
to extend global exception handing functionality of 'flask-restful'.
"""
def handle_error(self, err):
"""It helps preventing writing unnecessary
try/except block though out the application
"""
print(err) # log every exception raised in the application
# Handle HTTPExceptions
if isinstance(err, HTTPException):
return jsonify({"message": err.msg, "code": err.code}), err.code
# If msg attribute is not set,
# consider it as Python core exception and
# hide sensitive error info from end user
if not getattr(err, "message", None):
return jsonify({"message": "Server has encountered some error"}), 500
# Handle application specific custom exceptions
return jsonify(**err.kwargs), err.http_status_code
bp_mob = Blueprint("mobile_restapi", __name__, url_prefix="/api/v1/mobile")
bp_web = Blueprint("web_restapi", __name__, url_prefix="/api/v1/web")
api_mob = ExtendedAPI(bp_mob, doc="mobile/documentation")
api_web = ExtendedAPI(bp_web, doc="web/documentation")
# exlcude routes mobile for csrf token validation
csrf.exempt(bp_mob)
# namespace for all web Controllers
ns_auth = api_web.namespace("authenticate", description="AuthControllers")
ns_location = api_web.namespace("getCurrentLocation", description="LocationControllers")
ns_commands = api_web.namespace("commands", description="CommandsControllers")
ns_transfer = api_web.namespace("transfer", description="TransferControllers")
# namespace for all mobile Controllers
ns_events = api_mob.namespace("events", description="EventControllers")
ns_user = api_mob.namespace("users", description="UserControllers")
def init_app(app):
app.register_blueprint(bp_mob)
app.register_blueprint(bp_web)
| from flask import Blueprint, jsonify
from flask_restplus import Api
from werkzeug.exceptions import HTTPException
from nanobrok.ext.csrf_protect import csrf
# This file is part of the Nanobrok Open Source Project.
# nanobrok is licensed under the Apache 2.0.
# Copyright 2021 p0cL4bs Team - <NAME> (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ExtendedAPI(Api):
"""This class overrides 'handle_error' method of 'Api' class ,
to extend global exception handing functionality of 'flask-restful'.
"""
def handle_error(self, err):
"""It helps preventing writing unnecessary
try/except block though out the application
"""
print(err) # log every exception raised in the application
# Handle HTTPExceptions
if isinstance(err, HTTPException):
return jsonify({"message": err.msg, "code": err.code}), err.code
# If msg attribute is not set,
# consider it as Python core exception and
# hide sensitive error info from end user
if not getattr(err, "message", None):
return jsonify({"message": "Server has encountered some error"}), 500
# Handle application specific custom exceptions
return jsonify(**err.kwargs), err.http_status_code
bp_mob = Blueprint("mobile_restapi", __name__, url_prefix="/api/v1/mobile")
bp_web = Blueprint("web_restapi", __name__, url_prefix="/api/v1/web")
api_mob = ExtendedAPI(bp_mob, doc="mobile/documentation")
api_web = ExtendedAPI(bp_web, doc="web/documentation")
# exlcude routes mobile for csrf token validation
csrf.exempt(bp_mob)
# namespace for all web Controllers
ns_auth = api_web.namespace("authenticate", description="AuthControllers")
ns_location = api_web.namespace("getCurrentLocation", description="LocationControllers")
ns_commands = api_web.namespace("commands", description="CommandsControllers")
ns_transfer = api_web.namespace("transfer", description="TransferControllers")
# namespace for all mobile Controllers
ns_events = api_mob.namespace("events", description="EventControllers")
ns_user = api_mob.namespace("users", description="UserControllers")
def init_app(app):
app.register_blueprint(bp_mob)
app.register_blueprint(bp_web)
| en | 0.800343 | # This file is part of the Nanobrok Open Source Project. # nanobrok is licensed under the Apache 2.0. # Copyright 2021 p0cL4bs Team - <NAME> (mh4x0f) # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This class overrides 'handle_error' method of 'Api' class , to extend global exception handing functionality of 'flask-restful'. It helps preventing writing unnecessary try/except block though out the application # log every exception raised in the application # Handle HTTPExceptions # If msg attribute is not set, # consider it as Python core exception and # hide sensitive error info from end user # Handle application specific custom exceptions # exlcude routes mobile for csrf token validation # namespace for all web Controllers # namespace for all mobile Controllers | 2.335898 | 2 |
orkg/client/papers.py | Reddine/orkg-pypi | 2 | 6618095 | <reponame>Reddine/orkg-pypi
from orkg.utils import NamespacedClient, query_params, dict_to_url_params
from orkg.out import OrkgResponse
class PapersClient(NamespacedClient):
def add(self, params=None):
"""
Create a new paper in the ORKG instance
:param params: paper Object
:return: an OrkgResponse object containing the newly created paper ressource
"""
self.client.backend._append_slash = True
response = self.client.backend.papers.POST(json=params)
return OrkgResponse(response)
| from orkg.utils import NamespacedClient, query_params, dict_to_url_params
from orkg.out import OrkgResponse
class PapersClient(NamespacedClient):
def add(self, params=None):
"""
Create a new paper in the ORKG instance
:param params: paper Object
:return: an OrkgResponse object containing the newly created paper ressource
"""
self.client.backend._append_slash = True
response = self.client.backend.papers.POST(json=params)
return OrkgResponse(response) | en | 0.78757 | Create a new paper in the ORKG instance :param params: paper Object :return: an OrkgResponse object containing the newly created paper ressource | 2.707815 | 3 |
challenge.py | JaimeEV/challenge-python-05 | 0 | 6618096 | import math
def square_area(side):
"""Returns the area of a square"""
# You have to code here
# REMEMBER: Tests first!!!
return side**2
def rectangle_area(base, height):
"""Returns the area of a rectangle"""
# You have to code here
# REMEMBER: Tests first!!!
return base * height
def triangle_area(base, height):
"""Returns the area of a triangle"""
# You have to code here
# REMEMBER: Tests first!!!
return (base * height)/2
def rhombus_area(diagonal_1, diagonal_2):
"""Returns the area of a rhombus"""
# You have to code here
# REMEMBER: Tests first!!!
return (diagonal_1 * diagonal_2)/2
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
# You have to code here
# REMEMBER: Tests first!!!
return ((base_minor + base_major)*height)/2
def regular_polygon_area(perimeter, apothem):
"""Returns the area of a regular polygon"""
# You have to code here
# REMEMBER: Tests first!!!
return (perimeter * apothem)/2
def circumference_area(radius):
"""Returns the area of a circumference"""
# You have to code here
# REMEMBER: Tests first!!!
# Use math.pi for π value
area = math.pi * (radius**2)
return round(area,2)
if __name__ == '__main__':
import unittest
class GeometrySuite(unittest.TestCase):
def setUp(self):
# Initialize the needed values for the tests
global side, base, height, diagonal_1, diagonal_2,base_minor, base_major, perimeter, apothem, radius
side = 2
base = 3
height = 4
diagonal_1 = 8
diagonal_2 = 6
base_minor = 20
base_major = 40
perimeter = 30
apothem = 4
radius = 5
def test_square_area(self):
# Make this test first...
result = square_area(side)
self.assertEqual(result, 4)
def test_rectangle_area(self):
# Make this test first...
result = rectangle_area(base, height)
self.assertEqual(result, 12)
def test_triangle_area(self):
# Make this test first...
result = triangle_area(base , height)
self.assertEqual(result, 6)
def test_rhombus_area(self):
# Make this test first...
result = rhombus_area(diagonal_1, diagonal_2)
self.assertEqual(result, 24)
def test_trapezoid_area(self):
# Make this test first...
result = trapezoid_area(base_minor, base_major, height)
self.assertEqual(result, 120)
def test_regular_polygon_area(self):
# Make this test first...
result = regular_polygon_area(perimeter, apothem)
self.assertEqual(result, 60)
def test_circumference_area(self):
# Make this test first...
result = circumference_area(radius)
self.assertEqual(result, 78.54)
def tearDown(self):
# Delete the needed values for the tests
pass
unittest.main()
| import math
def square_area(side):
"""Returns the area of a square"""
# You have to code here
# REMEMBER: Tests first!!!
return side**2
def rectangle_area(base, height):
"""Returns the area of a rectangle"""
# You have to code here
# REMEMBER: Tests first!!!
return base * height
def triangle_area(base, height):
"""Returns the area of a triangle"""
# You have to code here
# REMEMBER: Tests first!!!
return (base * height)/2
def rhombus_area(diagonal_1, diagonal_2):
"""Returns the area of a rhombus"""
# You have to code here
# REMEMBER: Tests first!!!
return (diagonal_1 * diagonal_2)/2
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
# You have to code here
# REMEMBER: Tests first!!!
return ((base_minor + base_major)*height)/2
def regular_polygon_area(perimeter, apothem):
"""Returns the area of a regular polygon"""
# You have to code here
# REMEMBER: Tests first!!!
return (perimeter * apothem)/2
def circumference_area(radius):
"""Returns the area of a circumference"""
# You have to code here
# REMEMBER: Tests first!!!
# Use math.pi for π value
area = math.pi * (radius**2)
return round(area,2)
if __name__ == '__main__':
import unittest
class GeometrySuite(unittest.TestCase):
def setUp(self):
# Initialize the needed values for the tests
global side, base, height, diagonal_1, diagonal_2,base_minor, base_major, perimeter, apothem, radius
side = 2
base = 3
height = 4
diagonal_1 = 8
diagonal_2 = 6
base_minor = 20
base_major = 40
perimeter = 30
apothem = 4
radius = 5
def test_square_area(self):
# Make this test first...
result = square_area(side)
self.assertEqual(result, 4)
def test_rectangle_area(self):
# Make this test first...
result = rectangle_area(base, height)
self.assertEqual(result, 12)
def test_triangle_area(self):
# Make this test first...
result = triangle_area(base , height)
self.assertEqual(result, 6)
def test_rhombus_area(self):
# Make this test first...
result = rhombus_area(diagonal_1, diagonal_2)
self.assertEqual(result, 24)
def test_trapezoid_area(self):
# Make this test first...
result = trapezoid_area(base_minor, base_major, height)
self.assertEqual(result, 120)
def test_regular_polygon_area(self):
# Make this test first...
result = regular_polygon_area(perimeter, apothem)
self.assertEqual(result, 60)
def test_circumference_area(self):
# Make this test first...
result = circumference_area(radius)
self.assertEqual(result, 78.54)
def tearDown(self):
# Delete the needed values for the tests
pass
unittest.main()
| en | 0.734555 | Returns the area of a square # You have to code here # REMEMBER: Tests first!!! Returns the area of a rectangle # You have to code here # REMEMBER: Tests first!!! Returns the area of a triangle # You have to code here # REMEMBER: Tests first!!! Returns the area of a rhombus # You have to code here # REMEMBER: Tests first!!! Returns the area of a trapezoid # You have to code here # REMEMBER: Tests first!!! Returns the area of a regular polygon # You have to code here # REMEMBER: Tests first!!! Returns the area of a circumference # You have to code here # REMEMBER: Tests first!!! # Use math.pi for π value # Initialize the needed values for the tests # Make this test first... # Make this test first... # Make this test first... # Make this test first... # Make this test first... # Make this test first... # Make this test first... # Delete the needed values for the tests | 4.206992 | 4 |
data_facts/scripts/data_metrics.py | demetriusHal/sigmod2018 | 2 | 6618097 | <reponame>demetriusHal/sigmod2018
#!/usr/bin/env python
# CREATED BY ORESTIS
# CONTINUED BY YORGOS
import matplotlib.pyplot as plt
import argparse
import sys
import pandas as pd
from decimal import *
# parse command line arguments
parser = argparse.ArgumentParser(description='Give me path to directory of .tbl files. Give me number of .tbl files. I will start from 0 and try to reach your given number minus 1. Also, give me an order.')
parser.add_argument('path', metavar='P', type=str, nargs='+',
help='path to the directory of .tbl files')
parser.add_argument('number', metavar='N', type=int, nargs='+',
help='number of .tbl files')
parser.add_argument('order', metavar='O', type=str, nargs='+',
help='order')
args = parser.parse_args()
if len(sys.argv) > 4:
sys.exit(1)
# path to directory of .tbl files
path = sys.argv[1]
if path[len(path)-1] != '\n':
path += '/'
# Number of relations
relations = int(sys.argv[2])
# is order ascending
is_ascending = False
if sys.argv[3].lower() == "asc" or sys.argv[3].lower() == "ascending" or sys.argv[3].lower() == "a":
is_ascending = True
# Relations' numbers
relations_numbers = [i for i in range(relations)]
# Count the columns of every relation
columns_counter = [0] * relations
# Count the rows of every relation
rows_counter = [0] * relations
# Count the sorted columns of every relation
sorted_columns_counter = [0] * relations
# Dictionary that maps the sorted columns of every relation
# to their respective relation
sorted_columns_dict = {}
# Dictionary that maps the number of unique elements per column
# to their respective table
unique_elements_dict = {}
for i in range(relations):
relation_file = "r" + str(i) + ".tbl"
with open(path + relation_file, "r") as file:
# the latest element read from each column
latest_element = []
# can a column be sorted from what we have seen so far?
is_sorted = []
# an array of sets that hold the (unique) elements of each column
column_elements_set = []
for line in file:
# count columns
if columns_counter[i] == 0:
columns_counter[i] = line.count('|')
latest_element = [-1] * columns_counter[i]
is_sorted = [True] * columns_counter[i]
column_elements_set = [set() for _ in xrange(columns_counter[i])]
# Remove the trailing "|" + newline characters
line = line.split("|\n")[0];
columns = line.split("|")
columns = map(int, columns)
index = 0
for column in columns:
# if is sorted so far, go ahead and try to prove otherwise
if is_sorted[index]:
if latest_element[index] != -1:
# we want ascending order
if is_ascending:
if latest_element[index] > column:
is_sorted[index] = False
# we want descending order
else:
if latest_element[index] < column:
is_sorted[index] = False
latest_element[index] = column
column_elements_set[index].add(column)
index += 1
# END for column
# count rows
rows_counter[i] += 1
# END for line
if i in unique_elements_dict:
for index in range(0, columns_counter[i]):
unique_elements_dict[i][index] = len(column_elements_set[index])
else:
unique_elements_dict[i] = [-1] * columns_counter[i]
for index in range(0, columns_counter[i]):
unique_elements_dict[i][index] = len(column_elements_set[index])
sorted_columns_dict[i] = []
index = 0
for s in is_sorted:
# print(s)
# print("*")
if s:
# print(s)
sorted_columns_counter[i] += 1
sorted_columns_dict[i].append(index)
index += 1
# END for s
# END with file open
# close table's file
file.close()
# Plot the columns data
plt.bar(range(len(columns_counter)), columns_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of columns per relation")
plt.xlabel("Relations")
plt.ylabel("Number of columns")
plt.show()
# Plot the rows data
plt.bar(range(len(rows_counter)), rows_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of rows per relation")
plt.xlabel("Relations")
plt.ylabel("Number of rows")
plt.show()
# Plot the sorted columns data
plt.bar(range(len(sorted_columns_counter)), sorted_columns_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of sorted columns per relation")
plt.xlabel("Relations")
plt.ylabel("Number of sorted columns")
plt.show()
# More details regarding sorted columns
for table in sorted_columns_dict:
if len(sorted_columns_dict[table]) == 0:
print("Table " + str(table) + " has no sorted columns.")
else:
print("Table " + str(table) + " has the column(s) ")
for i in sorted_columns_dict[table]:
print(str(i) + " ")
print("sorted.")
# Details regarding unique elements
getcontext().prec = 3 # set precision
for table in unique_elements_dict:
print("Table " + str(table) + " has " + str(rows_counter[table]) + " rows and:")
index = 0
for column in unique_elements_dict[table]:
print("Column " + str(index) + " with " + str(column) + " (Fq = " + str(Decimal(column)/ Decimal(rows_counter[table])) + ") unique elements.")
index += 1
| #!/usr/bin/env python
# CREATED BY ORESTIS
# CONTINUED BY YORGOS
import matplotlib.pyplot as plt
import argparse
import sys
import pandas as pd
from decimal import *
# parse command line arguments
parser = argparse.ArgumentParser(description='Give me path to directory of .tbl files. Give me number of .tbl files. I will start from 0 and try to reach your given number minus 1. Also, give me an order.')
parser.add_argument('path', metavar='P', type=str, nargs='+',
help='path to the directory of .tbl files')
parser.add_argument('number', metavar='N', type=int, nargs='+',
help='number of .tbl files')
parser.add_argument('order', metavar='O', type=str, nargs='+',
help='order')
args = parser.parse_args()
if len(sys.argv) > 4:
sys.exit(1)
# path to directory of .tbl files
path = sys.argv[1]
if path[len(path)-1] != '\n':
path += '/'
# Number of relations
relations = int(sys.argv[2])
# is order ascending
is_ascending = False
if sys.argv[3].lower() == "asc" or sys.argv[3].lower() == "ascending" or sys.argv[3].lower() == "a":
is_ascending = True
# Relations' numbers
relations_numbers = [i for i in range(relations)]
# Count the columns of every relation
columns_counter = [0] * relations
# Count the rows of every relation
rows_counter = [0] * relations
# Count the sorted columns of every relation
sorted_columns_counter = [0] * relations
# Dictionary that maps the sorted columns of every relation
# to their respective relation
sorted_columns_dict = {}
# Dictionary that maps the number of unique elements per column
# to their respective table
unique_elements_dict = {}
for i in range(relations):
relation_file = "r" + str(i) + ".tbl"
with open(path + relation_file, "r") as file:
# the latest element read from each column
latest_element = []
# can a column be sorted from what we have seen so far?
is_sorted = []
# an array of sets that hold the (unique) elements of each column
column_elements_set = []
for line in file:
# count columns
if columns_counter[i] == 0:
columns_counter[i] = line.count('|')
latest_element = [-1] * columns_counter[i]
is_sorted = [True] * columns_counter[i]
column_elements_set = [set() for _ in xrange(columns_counter[i])]
# Remove the trailing "|" + newline characters
line = line.split("|\n")[0];
columns = line.split("|")
columns = map(int, columns)
index = 0
for column in columns:
# if is sorted so far, go ahead and try to prove otherwise
if is_sorted[index]:
if latest_element[index] != -1:
# we want ascending order
if is_ascending:
if latest_element[index] > column:
is_sorted[index] = False
# we want descending order
else:
if latest_element[index] < column:
is_sorted[index] = False
latest_element[index] = column
column_elements_set[index].add(column)
index += 1
# END for column
# count rows
rows_counter[i] += 1
# END for line
if i in unique_elements_dict:
for index in range(0, columns_counter[i]):
unique_elements_dict[i][index] = len(column_elements_set[index])
else:
unique_elements_dict[i] = [-1] * columns_counter[i]
for index in range(0, columns_counter[i]):
unique_elements_dict[i][index] = len(column_elements_set[index])
sorted_columns_dict[i] = []
index = 0
for s in is_sorted:
# print(s)
# print("*")
if s:
# print(s)
sorted_columns_counter[i] += 1
sorted_columns_dict[i].append(index)
index += 1
# END for s
# END with file open
# close table's file
file.close()
# Plot the columns data
plt.bar(range(len(columns_counter)), columns_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of columns per relation")
plt.xlabel("Relations")
plt.ylabel("Number of columns")
plt.show()
# Plot the rows data
plt.bar(range(len(rows_counter)), rows_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of rows per relation")
plt.xlabel("Relations")
plt.ylabel("Number of rows")
plt.show()
# Plot the sorted columns data
plt.bar(range(len(sorted_columns_counter)), sorted_columns_counter, align='center')
plt.xticks(range(len(relations_numbers)), relations_numbers)
plt.title("Number of sorted columns per relation")
plt.xlabel("Relations")
plt.ylabel("Number of sorted columns")
plt.show()
# More details regarding sorted columns
for table in sorted_columns_dict:
if len(sorted_columns_dict[table]) == 0:
print("Table " + str(table) + " has no sorted columns.")
else:
print("Table " + str(table) + " has the column(s) ")
for i in sorted_columns_dict[table]:
print(str(i) + " ")
print("sorted.")
# Details regarding unique elements
getcontext().prec = 3 # set precision
for table in unique_elements_dict:
print("Table " + str(table) + " has " + str(rows_counter[table]) + " rows and:")
index = 0
for column in unique_elements_dict[table]:
print("Column " + str(index) + " with " + str(column) + " (Fq = " + str(Decimal(column)/ Decimal(rows_counter[table])) + ") unique elements.")
index += 1 | en | 0.730079 | #!/usr/bin/env python # CREATED BY ORESTIS # CONTINUED BY YORGOS # parse command line arguments # path to directory of .tbl files # Number of relations # is order ascending # Relations' numbers # Count the columns of every relation # Count the rows of every relation # Count the sorted columns of every relation # Dictionary that maps the sorted columns of every relation # to their respective relation # Dictionary that maps the number of unique elements per column # to their respective table # the latest element read from each column # can a column be sorted from what we have seen so far? # an array of sets that hold the (unique) elements of each column # count columns # Remove the trailing "|" + newline characters # if is sorted so far, go ahead and try to prove otherwise # we want ascending order # we want descending order # END for column # count rows # END for line # print(s) # print("*") # print(s) # END for s # END with file open # close table's file # Plot the columns data # Plot the rows data # Plot the sorted columns data # More details regarding sorted columns # Details regarding unique elements # set precision | 3.135775 | 3 |
mochila_ed.py | joaovcaetano/Algoritmos-Bioinspirados | 0 | 6618098 | <gh_stars>0
from random import randint
from random import uniform
import math
import matplotlib.pyplot as plt
import random
tamanho_populacao = 200
numero_individuos = 8
cr = 0.9
f = 1.0 #0.6
geracoes = 100
melhores = []
individuo = []
def mochila():
produtos = []
produtos.append([11,1])
produtos.append([21,11])
produtos.append([31,21])
produtos.append([33,23])
produtos.append([43,33])
produtos.append([53,43])
produtos.append([55,45])
produtos.append([65,55])
return produtos
def popInicial():
for i in range(0,tamanho_populacao):
individuo.append([])
for j in range(0,numero_individuos):
if(random.random()< 0.5):
individuo[i].append(0)
else:
individuo[i].append(1)
return individuo
def geraPeso(individuo):
peso_individuo = 0
utilidade_mochila = 0
for i in range(0,len(individuo)):
if(individuo[i] == 1):
peso_individuo = peso_individuo + mochila[i][0]
utilidade_mochila = utilidade_mochila + mochila[i][1]
mochila_individuo = []
mochila_individuo.append(peso_individuo)
mochila_individuo.append(utilidade_mochila)
return mochila_individuo
def fitness(individuo):
fit_ind = []
peso_uti = geraPeso(individuo)
if(peso_uti[0] > 100):
if(peso_uti[0] >= 200):
peso_uti[0] = 199
extrapola = peso_uti[0] - 100
porcentagem_extrapolada = float(extrapola) / float(100)
reduz_uti = porcentagem_extrapolada * float(peso_uti[1])
peso_uti[0] = 100
peso_uti[1] = peso_uti[1] - reduz_uti
fit_ind.append(peso_uti)
else:
fit_ind.append(peso_uti)
return fit_ind
def mutacao(individuo):
x1 = 0
x2 = 1
x3 = 2
while((x1 != x3) and (x1 != x2) and (x2 != x3)):
x1 = uniform(0.0, float(tamanho_populacao))
x2 = uniform(0.0, float(tamanho_populacao))
x3 = uniform(0.0, float(tamanho_populacao))
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
ind1 = individuo[x1]
ind2 = individuo[x2]
ind3 = individuo[x3]
for i in range(0,numero_individuos):
ind1[i] = ind1[i] +(ind3[i] * ind2[i])
if ind1[i] > 1:
ind1[i] = 1
return ind1
def cruzamento(individuo, pop_inter):
filhos = []
for i in range(0,tamanho_populacao):
filho = []
for j in range(0, numero_individuos):
random = uniform(0.0, 1.0)
if(random < cr):
filho.append(individuo[i][j])
else:
filho.append(pop_inter[i][j])
filhos.append(filho)
return filhos
def prox_geracao(individuo, filhos, fit_pai):
novo_ind = []
for i in range(0, len(individuo)):
fit_filho = fitness(filhos[i])
if (fit_pai[0][0][1] > fit_filho[0][1]):
novo_ind.append(individuo[i])
else:
novo_ind.append(filhos[i])
return novo_ind
def fit_geral(populacao):
fit_geral = [0] * tamanho_populacao
for i in range(0, len(populacao)):
fit_i = fitness(populacao[i])
fit_geral[i] = fit_i
return fit_geral
mochila = mochila()
individuo = popInicial()
k = 0
melhor = []
fit_melhor = fit_geral(individuo)
while(k<geracoes):
pop_int = []
for i in range(0,tamanho_populacao):
ind_int = mutacao(individuo)
pop_int.append(ind_int)
filhos = cruzamento(individuo, pop_int)
novo_ind = prox_geracao(individuo, filhos, fit_melhor)
util = []
for i in range(0,len(fit_melhor)):
util.append(fit_melhor[i][0][1])
index = util.index(max(util))
melhor.append(max(util))
novo_ind[0] = individuo[index]
k = k + 1
individuo = novo_ind
print melhor | from random import randint
from random import uniform
import math
import matplotlib.pyplot as plt
import random
tamanho_populacao = 200
numero_individuos = 8
cr = 0.9
f = 1.0 #0.6
geracoes = 100
melhores = []
individuo = []
def mochila():
produtos = []
produtos.append([11,1])
produtos.append([21,11])
produtos.append([31,21])
produtos.append([33,23])
produtos.append([43,33])
produtos.append([53,43])
produtos.append([55,45])
produtos.append([65,55])
return produtos
def popInicial():
for i in range(0,tamanho_populacao):
individuo.append([])
for j in range(0,numero_individuos):
if(random.random()< 0.5):
individuo[i].append(0)
else:
individuo[i].append(1)
return individuo
def geraPeso(individuo):
peso_individuo = 0
utilidade_mochila = 0
for i in range(0,len(individuo)):
if(individuo[i] == 1):
peso_individuo = peso_individuo + mochila[i][0]
utilidade_mochila = utilidade_mochila + mochila[i][1]
mochila_individuo = []
mochila_individuo.append(peso_individuo)
mochila_individuo.append(utilidade_mochila)
return mochila_individuo
def fitness(individuo):
fit_ind = []
peso_uti = geraPeso(individuo)
if(peso_uti[0] > 100):
if(peso_uti[0] >= 200):
peso_uti[0] = 199
extrapola = peso_uti[0] - 100
porcentagem_extrapolada = float(extrapola) / float(100)
reduz_uti = porcentagem_extrapolada * float(peso_uti[1])
peso_uti[0] = 100
peso_uti[1] = peso_uti[1] - reduz_uti
fit_ind.append(peso_uti)
else:
fit_ind.append(peso_uti)
return fit_ind
def mutacao(individuo):
x1 = 0
x2 = 1
x3 = 2
while((x1 != x3) and (x1 != x2) and (x2 != x3)):
x1 = uniform(0.0, float(tamanho_populacao))
x2 = uniform(0.0, float(tamanho_populacao))
x3 = uniform(0.0, float(tamanho_populacao))
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
ind1 = individuo[x1]
ind2 = individuo[x2]
ind3 = individuo[x3]
for i in range(0,numero_individuos):
ind1[i] = ind1[i] +(ind3[i] * ind2[i])
if ind1[i] > 1:
ind1[i] = 1
return ind1
def cruzamento(individuo, pop_inter):
filhos = []
for i in range(0,tamanho_populacao):
filho = []
for j in range(0, numero_individuos):
random = uniform(0.0, 1.0)
if(random < cr):
filho.append(individuo[i][j])
else:
filho.append(pop_inter[i][j])
filhos.append(filho)
return filhos
def prox_geracao(individuo, filhos, fit_pai):
novo_ind = []
for i in range(0, len(individuo)):
fit_filho = fitness(filhos[i])
if (fit_pai[0][0][1] > fit_filho[0][1]):
novo_ind.append(individuo[i])
else:
novo_ind.append(filhos[i])
return novo_ind
def fit_geral(populacao):
fit_geral = [0] * tamanho_populacao
for i in range(0, len(populacao)):
fit_i = fitness(populacao[i])
fit_geral[i] = fit_i
return fit_geral
mochila = mochila()
individuo = popInicial()
k = 0
melhor = []
fit_melhor = fit_geral(individuo)
while(k<geracoes):
pop_int = []
for i in range(0,tamanho_populacao):
ind_int = mutacao(individuo)
pop_int.append(ind_int)
filhos = cruzamento(individuo, pop_int)
novo_ind = prox_geracao(individuo, filhos, fit_melhor)
util = []
for i in range(0,len(fit_melhor)):
util.append(fit_melhor[i][0][1])
index = util.index(max(util))
melhor.append(max(util))
novo_ind[0] = individuo[index]
k = k + 1
individuo = novo_ind
print melhor | none | 1 | 2.965318 | 3 | |
tests/test_sign_cmd.py | ixje/app-neo3 | 0 | 6618099 | <reponame>ixje/app-neo3<gh_stars>0
import struct
from hashlib import sha256
from ecdsa.curves import NIST256p
from ecdsa.keys import VerifyingKey
from ecdsa.util import sigdecode_der
from neo3.network import node, payloads
from neo3.core import types, serialization
from neo3 import contracts, wallet, vm
def test_sign_tx(cmd, button):
"""
In order to debug this test locally while being able to see what's happening in the app
run Speculos as follows:
./speculos.py --sdk 2.0 --ontop --button-port 42000 ../app-neo3/bin/app.elf
Then in PyCharm add "--headless" as Additional Argument for the testcase.
This will make sure it selects TCPButton instead of a fake button that does nothing.
"""
bip44_path: str = "m/44'/888'/0'/0/0"
pub_key = cmd.get_public_key(
bip44_path=bip44_path,
display=False
) # type: bytes
pk: VerifyingKey = VerifyingKey.from_string(
pub_key,
curve=NIST256p,
hashfunc=sha256
)
signer = payloads.Signer(account=types.UInt160.from_string("d7678dd97c000be3f33e9362e673101bac4ca654"),
scope=payloads.WitnessScope.CALLED_BY_ENTRY)
witness = payloads.Witness(invocation_script=b'', verification_script=b'\x55')
magic = 860833102
# build a NEO transfer script
from_account = wallet.Account.address_to_script_hash("NSiVJYZej4XsxG5CUpdwn7VRQk8iiiDMPM").to_array()
to_account = wallet.Account.address_to_script_hash("NU5unwNcWLqPM21cNCRP1LPuhxsTpYvNTf").to_array()
amount = 11 * contracts.NeoToken().factor
data = None
sb = vm.ScriptBuilder()
sb.emit_dynamic_call_with_args(contracts.NeoToken().hash, "transfer", [from_account, to_account, amount, data])
tx = payloads.Transaction(version=0,
nonce=123,
system_fee=456,
network_fee=789,
valid_until_block=1,
attributes=[],
signers=[signer],
script=sb.to_array(),
witnesses=[witness])
der_sig = cmd.sign_tx(bip44_path=bip44_path,
transaction=tx,
network_magic=magic,
button=button)
with serialization.BinaryWriter() as writer:
tx.serialize_unsigned(writer)
tx_data: bytes = writer.to_array()
assert pk.verify(signature=der_sig,
data=struct.pack("I", magic) + sha256(tx_data).digest(),
hashfunc=sha256,
sigdecode=sigdecode_der) is True
| import struct
from hashlib import sha256
from ecdsa.curves import NIST256p
from ecdsa.keys import VerifyingKey
from ecdsa.util import sigdecode_der
from neo3.network import node, payloads
from neo3.core import types, serialization
from neo3 import contracts, wallet, vm
def test_sign_tx(cmd, button):
"""
In order to debug this test locally while being able to see what's happening in the app
run Speculos as follows:
./speculos.py --sdk 2.0 --ontop --button-port 42000 ../app-neo3/bin/app.elf
Then in PyCharm add "--headless" as Additional Argument for the testcase.
This will make sure it selects TCPButton instead of a fake button that does nothing.
"""
bip44_path: str = "m/44'/888'/0'/0/0"
pub_key = cmd.get_public_key(
bip44_path=bip44_path,
display=False
) # type: bytes
pk: VerifyingKey = VerifyingKey.from_string(
pub_key,
curve=NIST256p,
hashfunc=sha256
)
signer = payloads.Signer(account=types.UInt160.from_string("d7678dd97c000be3f33e9362e673101bac4ca654"),
scope=payloads.WitnessScope.CALLED_BY_ENTRY)
witness = payloads.Witness(invocation_script=b'', verification_script=b'\x55')
magic = 860833102
# build a NEO transfer script
from_account = wallet.Account.address_to_script_hash("NSiVJYZej4XsxG5CUpdwn7VRQk8iiiDMPM").to_array()
to_account = wallet.Account.address_to_script_hash("NU5unwNcWLqPM21cNCRP1LPuhxsTpYvNTf").to_array()
amount = 11 * contracts.NeoToken().factor
data = None
sb = vm.ScriptBuilder()
sb.emit_dynamic_call_with_args(contracts.NeoToken().hash, "transfer", [from_account, to_account, amount, data])
tx = payloads.Transaction(version=0,
nonce=123,
system_fee=456,
network_fee=789,
valid_until_block=1,
attributes=[],
signers=[signer],
script=sb.to_array(),
witnesses=[witness])
der_sig = cmd.sign_tx(bip44_path=bip44_path,
transaction=tx,
network_magic=magic,
button=button)
with serialization.BinaryWriter() as writer:
tx.serialize_unsigned(writer)
tx_data: bytes = writer.to_array()
assert pk.verify(signature=der_sig,
data=struct.pack("I", magic) + sha256(tx_data).digest(),
hashfunc=sha256,
sigdecode=sigdecode_der) is True | en | 0.82319 | In order to debug this test locally while being able to see what's happening in the app run Speculos as follows: ./speculos.py --sdk 2.0 --ontop --button-port 42000 ../app-neo3/bin/app.elf Then in PyCharm add "--headless" as Additional Argument for the testcase. This will make sure it selects TCPButton instead of a fake button that does nothing. # type: bytes # build a NEO transfer script | 2.042184 | 2 |
biothings_explorer/call_apis/api_response_transform/transformers/reasoner_transformer.py | Carolina1396/biothings_explorer | 21 | 6618100 | from collections import defaultdict
from .base_transformer import BaseTransformer
from ....config_new import ALWAYS_PREFIXED
class ReasonerTransformer(BaseTransformer):
def wrap(self, res):
result = defaultdict(list)
if not res.get("knowledge_graph"):
return res
if not res["knowledge_graph"].get("edges"):
return res
for edge in res["knowledge_graph"]["edges"]:
if "target_id" in edge:
prefix = edge["target_id"].split(":")[0]
if prefix in ALWAYS_PREFIXED:
result[prefix].append(edge["target_id"])
else:
result[prefix].append(edge["target_id"].split(":")[-1])
return {self.edge["association"]["predicate"]: result}
def jsonTransform(self, res):
return res
| from collections import defaultdict
from .base_transformer import BaseTransformer
from ....config_new import ALWAYS_PREFIXED
class ReasonerTransformer(BaseTransformer):
def wrap(self, res):
result = defaultdict(list)
if not res.get("knowledge_graph"):
return res
if not res["knowledge_graph"].get("edges"):
return res
for edge in res["knowledge_graph"]["edges"]:
if "target_id" in edge:
prefix = edge["target_id"].split(":")[0]
if prefix in ALWAYS_PREFIXED:
result[prefix].append(edge["target_id"])
else:
result[prefix].append(edge["target_id"].split(":")[-1])
return {self.edge["association"]["predicate"]: result}
def jsonTransform(self, res):
return res
| none | 1 | 2.620142 | 3 | |
scoring_utils.py | luvalenz/time-series-variability-tree | 1 | 6618101 | import numpy as np
import time
def relevance(retrieved_labels, relevant_label):
return np.array(retrieved_labels) == relevant_label
def dcg(relevance):
index = np.arange(len(relevance))
discount = np.log2(index + 2)
term = (2**relevance-1)/discount
return np.cumsum(term)
def map(retrieved, relevant_label):
relevants = relevance(retrieved, relevant_label)
cumulative_relevants = np.cumsum(relevants)
precision = cumulative_relevants/np.arange(1, len(cumulative_relevants) + 1)
map_score = np.sum(precision*relevants)/np.sum(relevants)
return map_score
def precision(retrieved, relevant_label, n):
relevants = relevance(retrieved, relevant_label)
cumulative_relevants = np.cumsum(relevants)
precision_score = cumulative_relevants/np.arange(1, len(cumulative_relevants) + 1)
length = len(precision_score)
if n <= length:
map_score = precision_score[:n]
else :
padding = n - length
print(padding)
map_score = np.concatenate((precision_score, -1*np.ones(padding)))
return map_score
def ndcg(retrieved, relevant_label, n):
rel_true = relevance(retrieved, relevant_label)
rel_ideal = np.sort(rel_true)[::-1]
dcg_score = dcg(rel_true)
idcg_score = dcg(rel_ideal)
ndcg_score = dcg_score/idcg_score
length = len(ndcg_score)
if n <= length:
ndcg_score = ndcg_score[:n]
else :
padding = n - length
print(padding)
ndcg_score = np.concatenate((ndcg_score, -1*np.ones(padding)))
return ndcg_score
class Timer:
def __init__(self):
self.elapsed_times = []
self.current_start = None
def start(self):
self.current_start = time.time()
def stop(self):
current = time.time()
elapsed = current - self.current_start
self.elapsed_times.append(elapsed)
self.print()
def print(self):
print(self.elapsed_times)
| import numpy as np
import time
def relevance(retrieved_labels, relevant_label):
return np.array(retrieved_labels) == relevant_label
def dcg(relevance):
index = np.arange(len(relevance))
discount = np.log2(index + 2)
term = (2**relevance-1)/discount
return np.cumsum(term)
def map(retrieved, relevant_label):
relevants = relevance(retrieved, relevant_label)
cumulative_relevants = np.cumsum(relevants)
precision = cumulative_relevants/np.arange(1, len(cumulative_relevants) + 1)
map_score = np.sum(precision*relevants)/np.sum(relevants)
return map_score
def precision(retrieved, relevant_label, n):
relevants = relevance(retrieved, relevant_label)
cumulative_relevants = np.cumsum(relevants)
precision_score = cumulative_relevants/np.arange(1, len(cumulative_relevants) + 1)
length = len(precision_score)
if n <= length:
map_score = precision_score[:n]
else :
padding = n - length
print(padding)
map_score = np.concatenate((precision_score, -1*np.ones(padding)))
return map_score
def ndcg(retrieved, relevant_label, n):
rel_true = relevance(retrieved, relevant_label)
rel_ideal = np.sort(rel_true)[::-1]
dcg_score = dcg(rel_true)
idcg_score = dcg(rel_ideal)
ndcg_score = dcg_score/idcg_score
length = len(ndcg_score)
if n <= length:
ndcg_score = ndcg_score[:n]
else :
padding = n - length
print(padding)
ndcg_score = np.concatenate((ndcg_score, -1*np.ones(padding)))
return ndcg_score
class Timer:
def __init__(self):
self.elapsed_times = []
self.current_start = None
def start(self):
self.current_start = time.time()
def stop(self):
current = time.time()
elapsed = current - self.current_start
self.elapsed_times.append(elapsed)
self.print()
def print(self):
print(self.elapsed_times)
| none | 1 | 2.813785 | 3 | |
SPOJ/Random/IWGBS/a.py | VastoLorde95/Competitive-Programming | 170 | 6618102 | <filename>SPOJ/Random/IWGBS/a.py
n = input()
if n == 0:
print 0
elif n == 1:
print 2
elif n == 2:
print 3
else:
a = 2
b = 3
c = 0
for i in xrange(2,n+1):
c = a+b
a = b
b = c
print a
| <filename>SPOJ/Random/IWGBS/a.py
n = input()
if n == 0:
print 0
elif n == 1:
print 2
elif n == 2:
print 3
else:
a = 2
b = 3
c = 0
for i in xrange(2,n+1):
c = a+b
a = b
b = c
print a
| none | 1 | 3.12529 | 3 | |
main.py | ymgan/ANTARXXVII-Leg1 | 0 | 6618103 | <gh_stars>0
import time
import requests
import csv
import datetime
corrected_records = []
def convert_extension_case(url):
"""
Some records could not resolved because of case sensitivity, where jpg should be JPG or vice versa.
The function converts uppercase extension to lowercase extension and vice versa.
:param url: url link to the multimedia file, should ends with file extension.
:return: url with lower case extension converted to uppercase or vice versa.
"""
# split at last "." gives ['https://zenodo.org/record/4942307/files/St24_BVV_A1', 'JPG']
split_url = url.rsplit('.', 1)
slug = split_url[0]
extension = split_url[1]
uppercase_extension = extension.upper()
lowercase_extension = extension.lower()
if split_url[1] == uppercase_extension:
new_url = '{}.{}'.format(slug, lowercase_extension)
else:
new_url = '{}.{}'.format(slug, uppercase_extension)
return new_url
def fix_url(file_path):
"""
A function to fix url using `convert_extension_case` function above. Will write the records into new file under
data/processed/ directory.
:param file_path: Path to multimedia file
:return:
"""
today = datetime.datetime.now().date()
with open(file_path) as f:
tsv_file = csv.reader(f, delimiter="\t")
for line in tsv_file:
time.sleep(0.1)
url = '{}'.format(line[3])
r = requests.get(url)
print(url, r.status_code)
if r.status_code == 200:
corrected_records.append(line)
else:
new_url = convert_extension_case(url)
corrected_records.append([line[0], line[1], line[2], new_url, line[4], line[5], line[6], line[7], line[8]])
fixed_url_file = './data/processed/{}_ANTAR-XXVII-multimedia.tsv'.format(today)
with open(fixed_url_file, "w+") as outfile:
tsv_writer = csv.writer(outfile, delimiter="\t")
for line in corrected_records:
tsv_writer.writerow(line)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
fix_url("data/interim/2021-06-23_ANTAR-XXVII.xlsx-multimedia.tsv")
| import time
import requests
import csv
import datetime
corrected_records = []
def convert_extension_case(url):
"""
Some records could not resolved because of case sensitivity, where jpg should be JPG or vice versa.
The function converts uppercase extension to lowercase extension and vice versa.
:param url: url link to the multimedia file, should ends with file extension.
:return: url with lower case extension converted to uppercase or vice versa.
"""
# split at last "." gives ['https://zenodo.org/record/4942307/files/St24_BVV_A1', 'JPG']
split_url = url.rsplit('.', 1)
slug = split_url[0]
extension = split_url[1]
uppercase_extension = extension.upper()
lowercase_extension = extension.lower()
if split_url[1] == uppercase_extension:
new_url = '{}.{}'.format(slug, lowercase_extension)
else:
new_url = '{}.{}'.format(slug, uppercase_extension)
return new_url
def fix_url(file_path):
"""
A function to fix url using `convert_extension_case` function above. Will write the records into new file under
data/processed/ directory.
:param file_path: Path to multimedia file
:return:
"""
today = datetime.datetime.now().date()
with open(file_path) as f:
tsv_file = csv.reader(f, delimiter="\t")
for line in tsv_file:
time.sleep(0.1)
url = '{}'.format(line[3])
r = requests.get(url)
print(url, r.status_code)
if r.status_code == 200:
corrected_records.append(line)
else:
new_url = convert_extension_case(url)
corrected_records.append([line[0], line[1], line[2], new_url, line[4], line[5], line[6], line[7], line[8]])
fixed_url_file = './data/processed/{}_ANTAR-XXVII-multimedia.tsv'.format(today)
with open(fixed_url_file, "w+") as outfile:
tsv_writer = csv.writer(outfile, delimiter="\t")
for line in corrected_records:
tsv_writer.writerow(line)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
fix_url("data/interim/2021-06-23_ANTAR-XXVII.xlsx-multimedia.tsv") | en | 0.689585 | Some records could not resolved because of case sensitivity, where jpg should be JPG or vice versa. The function converts uppercase extension to lowercase extension and vice versa. :param url: url link to the multimedia file, should ends with file extension. :return: url with lower case extension converted to uppercase or vice versa. # split at last "." gives ['https://zenodo.org/record/4942307/files/St24_BVV_A1', 'JPG'] A function to fix url using `convert_extension_case` function above. Will write the records into new file under data/processed/ directory. :param file_path: Path to multimedia file :return: # Press the green button in the gutter to run the script. | 3.761173 | 4 |
Code4ObjectDetection/Pre_process/dataset_format/wider2coco/convert_face_to_coco.py | MeepoAII/python | 0 | 6618104 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import re
import fnmatch
import datetime
from PIL import Image
import numpy as np
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
add_path(this_dir)
# print(this_dir)
add_path(os.path.join(this_dir, '..', '..'))
import utils
import utils.boxes as bboxs_util
import utils.face_utils as face_util
# INFO = {
# "description": "WIDER Face Dataset",
# "url": "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/",
# "version": "0.1.0",
# "year": 2018,
# "contributor": "umass vision",
# "date_created": datetime.datetime.utcnow().isoformat(' ')
# }
# LICENSES = [
# {
# "id": 1,
# "name": "placeholder",
# "url": "placeholder"
# }
# ]
# CATEGORIES = [
# {
# 'id': 1,
# 'name': 'face',
# 'supercategory': 'face',
# },
# ]
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="wider", default='wider', type=str)
parser.add_argument(
'--outdir', help="output dir for json files",
default='data/WIDER', type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default='data/WIDER', type=str)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default='data/WIDER', type=str)
parser.add_argument(
'--annotfile', help="directly specify the annotations file",
default='', type=str)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def convert_wider_annots(data_dir, out_dir, data_set='WIDER'):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
json_name = 'wider_face_train_annot_coco_style.json'
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt')
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(data_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
def convert_cs6_annots(ann_file, im_dir, out_dir, data_set='CS6-subset'):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
if data_set == 'CS6-subset':
json_name = 'cs6-subset_face_train_annot_coco_style.json'
# ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt')
else:
raise NotImplementedError
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(im_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "wider":
convert_wider_annots(args.datadir, args.outdir)
if args.dataset == "cs6-subset":
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-subset')
else:
print("Dataset not supported: %s" % args.dataset) | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import re
import fnmatch
import datetime
from PIL import Image
import numpy as np
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
add_path(this_dir)
# print(this_dir)
add_path(os.path.join(this_dir, '..', '..'))
import utils
import utils.boxes as bboxs_util
import utils.face_utils as face_util
# INFO = {
# "description": "WIDER Face Dataset",
# "url": "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/",
# "version": "0.1.0",
# "year": 2018,
# "contributor": "umass vision",
# "date_created": datetime.datetime.utcnow().isoformat(' ')
# }
# LICENSES = [
# {
# "id": 1,
# "name": "placeholder",
# "url": "placeholder"
# }
# ]
# CATEGORIES = [
# {
# 'id': 1,
# 'name': 'face',
# 'supercategory': 'face',
# },
# ]
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="wider", default='wider', type=str)
parser.add_argument(
'--outdir', help="output dir for json files",
default='data/WIDER', type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default='data/WIDER', type=str)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default='data/WIDER', type=str)
parser.add_argument(
'--annotfile', help="directly specify the annotations file",
default='', type=str)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def convert_wider_annots(data_dir, out_dir, data_set='WIDER'):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
json_name = 'wider_face_train_annot_coco_style.json'
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt')
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(data_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
def convert_cs6_annots(ann_file, im_dir, out_dir, data_set='CS6-subset'):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
if data_set == 'CS6-subset':
json_name = 'cs6-subset_face_train_annot_coco_style.json'
# ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt')
else:
raise NotImplementedError
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(im_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "wider":
convert_wider_annots(args.datadir, args.outdir)
if args.dataset == "cs6-subset":
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-subset')
else:
print("Dataset not supported: %s" % args.dataset) | en | 0.291177 | # print(this_dir) # INFO = { # "description": "WIDER Face Dataset", # "url": "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/", # "version": "0.1.0", # "year": 2018, # "contributor": "umass vision", # "date_created": datetime.datetime.utcnow().isoformat(' ') # } # LICENSES = [ # { # "id": 1, # "name": "placeholder", # "url": "placeholder" # } # ] # CATEGORIES = [ # { # 'id': 1, # 'name': 'face', # 'supercategory': 'face', # }, # ] # if len(sys.argv) == 1: # parser.print_help() # sys.exit(1) Convert from WIDER FDDB-style format to COCO bounding box # [im-file] = [[x,y,w,h], ...] # 1:"face" for WIDER Convert from WIDER FDDB-style format to COCO bounding box # ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt') # [im-file] = [[x,y,w,h], ...] # 1:"face" for WIDER | 2.039484 | 2 |
django_eth_events/migrations/0006_block_timestamp.py | vaporyorg/django-eth-events | 39 | 6618105 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-29 14:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_eth_events', '0005_daemon_status'),
]
operations = [
migrations.AddField(
model_name='block',
name='timestamp',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-29 14:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_eth_events', '0005_daemon_status'),
]
operations = [
migrations.AddField(
model_name='block',
name='timestamp',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| en | 0.759083 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-11-29 14:46 | 1.57434 | 2 |
4/main.py | TTvanWillegen/AdventOfCode2019 | 0 | 6618106 | def main():
part_one()
part_two()
def part_one():
print("Part One")
with open("input.txt", "r") as input_file:
for line in input_file:
password_range = line.rstrip().split("-")
counter = 0
for i in range(int(password_range[0]), int(password_range[1])):
success = True
double_char = False
last_number = None
for number in str(i):
if last_number is None:
last_number = number
continue
if int(last_number) > int(number):
success = False
break
if int(last_number) == int(number):
last_number = number
double_char = True
if int(last_number) < int(number):
last_number = number
if success:
counter += 1 if double_char else 0
print(i)
print("N = ", counter)
def part_two():
print("Part Two")
with open("input.txt", "r") as input_file:
for line in input_file:
password_range = line.rstrip().split("-")
counter = 0
for i in range(int(password_range[0]), int(password_range[1])):
success = True
last_number = None
double_counter = [0 for x in range(0, 10)]
for number in str(i):
double_counter[int(number)] += 1
if last_number is None:
last_number = number
continue
else:
if int(last_number) > int(number):
success = False
break
if int(last_number) == int(number):
last_number = number
if int(last_number) < int(number):
last_number = number
if success:
double_char = False
for x in range(0, 10):
double_char = double_char or double_counter[x] == 2
counter += 1 if double_char else 0
if double_char:
print(i)
print(double_counter)
print("N = ", counter)
if __name__ == "__main__":
main()
| def main():
part_one()
part_two()
def part_one():
print("Part One")
with open("input.txt", "r") as input_file:
for line in input_file:
password_range = line.rstrip().split("-")
counter = 0
for i in range(int(password_range[0]), int(password_range[1])):
success = True
double_char = False
last_number = None
for number in str(i):
if last_number is None:
last_number = number
continue
if int(last_number) > int(number):
success = False
break
if int(last_number) == int(number):
last_number = number
double_char = True
if int(last_number) < int(number):
last_number = number
if success:
counter += 1 if double_char else 0
print(i)
print("N = ", counter)
def part_two():
print("Part Two")
with open("input.txt", "r") as input_file:
for line in input_file:
password_range = line.rstrip().split("-")
counter = 0
for i in range(int(password_range[0]), int(password_range[1])):
success = True
last_number = None
double_counter = [0 for x in range(0, 10)]
for number in str(i):
double_counter[int(number)] += 1
if last_number is None:
last_number = number
continue
else:
if int(last_number) > int(number):
success = False
break
if int(last_number) == int(number):
last_number = number
if int(last_number) < int(number):
last_number = number
if success:
double_char = False
for x in range(0, 10):
double_char = double_char or double_counter[x] == 2
counter += 1 if double_char else 0
if double_char:
print(i)
print(double_counter)
print("N = ", counter)
if __name__ == "__main__":
main()
| none | 1 | 3.702836 | 4 | |
records/12-01/__init__.py | AaronYang2333/CSCI_570 | 36 | 6618107 | <reponame>AaronYang2333/CSCI_570<filename>records/12-01/__init__.py
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '12/1/2020 7:57 PM' | __author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '12/1/2020 7:57 PM' | none | 1 | 0.956703 | 1 | |
project1/helper/help_functions.py | fabiorodp/IN_STK5000_Adaptive_methods_for_data_based_decision_making | 0 | 6618108 | <filename>project1/helper/help_functions.py
from sklearn.utils import resample
import matplotlib.pyplot as plt
from pandas import read_csv
import seaborn as sns
import numpy as np
import pandas as pd
def import_data():
"""Import real data from GitHub."""
observation_features = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/observation_features.csv.gz",
header=None,
)
treatment_features = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_features.csv.gz",
header=None,
)
treatment_action = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_actions.csv.gz",
header=None,
)
treatment_outcome = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_outcomes.csv.gz",
header=None,
)
labels = ['Covid-Recovered', 'Covid-Positive', 'No_Taste/Smell',
'Fever', 'Headache', 'Pneumonia', 'Stomach', 'Myocarditis',
'Blood-Clots', 'Death', 'Age', 'Gender', 'Income'] + \
[f'g{i}' for i in range(1, 129)] + \
['Asthma', 'Obesity', 'Smoking', 'Diabetes', 'Heart-disease',
'Hypertension', 'Vaccine1', 'Vaccine2', 'Vaccine3']
observation_features.columns = labels
treatment_features.columns = labels
treatment_action.columns = ['Treatment1', 'Treatment2']
treatment_outcome.columns = labels[0:10]
return (observation_features, treatment_features, treatment_action,
treatment_outcome)
def plot_heatmap_corr(features, title):
"""Show a correlation matrix."""
sns.heatmap(
features.corr(),
linewidths=0.1,
annot=True,
annot_kws={"fontsize": 8},
fmt='.2f'
).figure.subplots_adjust(left=0.2, bottom=0.3)
plt.title(title)
plt.show()
def age_analysis(df, plot_box=False, plot_dist=False):
"""Return a age analysis."""
# How many of death per age?
df[10].plot.box()
plt.show() if plot_box is True else None
# people with > 120 years old?
print(f"Number of deaths with over 120 years old: "
f"{df[10][df[10] > 120].count()}")
# age mean of the deaths
print(f"Age mean of the deaths: "
f"{df[10][df[9] == 1.0].mean()}")
# density of the ages given they are dead
df[10][df[9] == 1.0].plot.density()
plt.show() if plot_dist is True else None
# kind of normal distribution
def randomly_balance_data():
pass
def balance_data(data, param='Death'):
"""Balancing targets."""
df_dead = data[data[param] == 1.0]
df_not_dead = data[data[param] == 0.0].iloc[:df_dead.shape[0], :]
df_balanced = pd.concat([df_dead, df_not_dead])
df_balanced = df_balanced[df_balanced['Covid-Positive'] == 1].drop(['Covid-Positive'], axis=1)
return df_balanced
def feature_importance_methodology1(syn_neg_corr, syn_pos_corr):
print('Plotting...')
sns.barplot(
y=pd.concat([syn_neg_corr, syn_pos_corr], axis=0)[:-1].values,
x=pd.concat([syn_neg_corr, syn_pos_corr], axis=0)[:-1].index,
).figure.subplots_adjust(left=0.15, bottom=0.3)
plt.xticks(rotation=90)
plt.title("Top negative and positive correlations with 'Death'")
plt.ylabel('Correlation scores')
plt.show()
print('done.')
def feature_importance_methodology3(best_model, topn=5,
return_top_positive=False,
return_top_negative=False):
arg_sorted = np.argsort(best_model.coef_.ravel())
top_pos_vals = best_model.coef_.ravel()[arg_sorted][-topn:]
top_pos_names = best_model.feature_names_in_[arg_sorted][-topn:]
top_neg_vals = best_model.coef_.ravel()[arg_sorted][:topn]
top_neg_names = best_model.feature_names_in_[arg_sorted][:topn]
print('Plotting...')
sns.barplot(
x=np.hstack((top_neg_vals, top_pos_vals)),
y=np.hstack((top_neg_names, top_pos_names))
).figure.subplots_adjust(left=0.2, bottom=0.2)
plt.xticks(rotation=90)
plt.title(f"Top LR model's selected coefficients")
plt.xlabel('Coefficients')
plt.show()
print('done.')
if return_top_positive is True:
return top_pos_names, top_pos_vals
elif return_top_negative is True:
return top_neg_names, top_neg_vals
def confidence_interval_plot(lr_model, top):
"""Boxplot for confidence intervals."""
ci_ = lr_model.conf_int()
ci_['Features'] = ci_.index
ci_['Median'] = [np.median(np.array([ci_.iloc[i, 0], ci_.iloc[i, 1]])) for
i in range(len(ci_))]
ci_ = ci_.sort_values(by='Median')
ci_topn = ci_.iloc[:top, :]
ci_topp = ci_.iloc[-top:, :]
ci_concat = pd.concat([ci_topn, ci_topp], axis=0).T
sns.boxplot(
data=ci_concat.iloc[:2, :],
orient='h'
).figure.subplots_adjust(left=0.22, bottom=0.15)
plt.title(
"95% confidence interval for top selected explanatory coefficients")
plt.xlabel("Coefficients' confidence interval")
plt.show()
| <filename>project1/helper/help_functions.py
from sklearn.utils import resample
import matplotlib.pyplot as plt
from pandas import read_csv
import seaborn as sns
import numpy as np
import pandas as pd
def import_data():
"""Import real data from GitHub."""
observation_features = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/observation_features.csv.gz",
header=None,
)
treatment_features = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_features.csv.gz",
header=None,
)
treatment_action = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_actions.csv.gz",
header=None,
)
treatment_outcome = read_csv(
"https://raw.githubusercontent.com/fabiorodp/IN_STK5000"
"_Adaptive_methods_for_data_based_decision_making/main/"
"project1/data/treatment_outcomes.csv.gz",
header=None,
)
labels = ['Covid-Recovered', 'Covid-Positive', 'No_Taste/Smell',
'Fever', 'Headache', 'Pneumonia', 'Stomach', 'Myocarditis',
'Blood-Clots', 'Death', 'Age', 'Gender', 'Income'] + \
[f'g{i}' for i in range(1, 129)] + \
['Asthma', 'Obesity', 'Smoking', 'Diabetes', 'Heart-disease',
'Hypertension', 'Vaccine1', 'Vaccine2', 'Vaccine3']
observation_features.columns = labels
treatment_features.columns = labels
treatment_action.columns = ['Treatment1', 'Treatment2']
treatment_outcome.columns = labels[0:10]
return (observation_features, treatment_features, treatment_action,
treatment_outcome)
def plot_heatmap_corr(features, title):
"""Show a correlation matrix."""
sns.heatmap(
features.corr(),
linewidths=0.1,
annot=True,
annot_kws={"fontsize": 8},
fmt='.2f'
).figure.subplots_adjust(left=0.2, bottom=0.3)
plt.title(title)
plt.show()
def age_analysis(df, plot_box=False, plot_dist=False):
"""Return a age analysis."""
# How many of death per age?
df[10].plot.box()
plt.show() if plot_box is True else None
# people with > 120 years old?
print(f"Number of deaths with over 120 years old: "
f"{df[10][df[10] > 120].count()}")
# age mean of the deaths
print(f"Age mean of the deaths: "
f"{df[10][df[9] == 1.0].mean()}")
# density of the ages given they are dead
df[10][df[9] == 1.0].plot.density()
plt.show() if plot_dist is True else None
# kind of normal distribution
def randomly_balance_data():
pass
def balance_data(data, param='Death'):
"""Balancing targets."""
df_dead = data[data[param] == 1.0]
df_not_dead = data[data[param] == 0.0].iloc[:df_dead.shape[0], :]
df_balanced = pd.concat([df_dead, df_not_dead])
df_balanced = df_balanced[df_balanced['Covid-Positive'] == 1].drop(['Covid-Positive'], axis=1)
return df_balanced
def feature_importance_methodology1(syn_neg_corr, syn_pos_corr):
print('Plotting...')
sns.barplot(
y=pd.concat([syn_neg_corr, syn_pos_corr], axis=0)[:-1].values,
x=pd.concat([syn_neg_corr, syn_pos_corr], axis=0)[:-1].index,
).figure.subplots_adjust(left=0.15, bottom=0.3)
plt.xticks(rotation=90)
plt.title("Top negative and positive correlations with 'Death'")
plt.ylabel('Correlation scores')
plt.show()
print('done.')
def feature_importance_methodology3(best_model, topn=5,
return_top_positive=False,
return_top_negative=False):
arg_sorted = np.argsort(best_model.coef_.ravel())
top_pos_vals = best_model.coef_.ravel()[arg_sorted][-topn:]
top_pos_names = best_model.feature_names_in_[arg_sorted][-topn:]
top_neg_vals = best_model.coef_.ravel()[arg_sorted][:topn]
top_neg_names = best_model.feature_names_in_[arg_sorted][:topn]
print('Plotting...')
sns.barplot(
x=np.hstack((top_neg_vals, top_pos_vals)),
y=np.hstack((top_neg_names, top_pos_names))
).figure.subplots_adjust(left=0.2, bottom=0.2)
plt.xticks(rotation=90)
plt.title(f"Top LR model's selected coefficients")
plt.xlabel('Coefficients')
plt.show()
print('done.')
if return_top_positive is True:
return top_pos_names, top_pos_vals
elif return_top_negative is True:
return top_neg_names, top_neg_vals
def confidence_interval_plot(lr_model, top):
"""Boxplot for confidence intervals."""
ci_ = lr_model.conf_int()
ci_['Features'] = ci_.index
ci_['Median'] = [np.median(np.array([ci_.iloc[i, 0], ci_.iloc[i, 1]])) for
i in range(len(ci_))]
ci_ = ci_.sort_values(by='Median')
ci_topn = ci_.iloc[:top, :]
ci_topp = ci_.iloc[-top:, :]
ci_concat = pd.concat([ci_topn, ci_topp], axis=0).T
sns.boxplot(
data=ci_concat.iloc[:2, :],
orient='h'
).figure.subplots_adjust(left=0.22, bottom=0.15)
plt.title(
"95% confidence interval for top selected explanatory coefficients")
plt.xlabel("Coefficients' confidence interval")
plt.show()
| en | 0.861153 | Import real data from GitHub. Show a correlation matrix. Return a age analysis. # How many of death per age? # people with > 120 years old? # age mean of the deaths # density of the ages given they are dead # kind of normal distribution Balancing targets. Boxplot for confidence intervals. | 3.097433 | 3 |
app/config.py | LupusAnay/image-resize | 1 | 6618109 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
upload_dir = os.path.join(basedir, '..', 'images')
class BaseConfig:
DEBUG = False
ALLOWED_EXTENSIONS = {'jpg', 'png'}
class TestingConfig(BaseConfig):
TEST_DIR = '../tests/test_images/'
TEST_IMAGE = os.path.join(TEST_DIR, 'test.jpg')
DEBUG = True
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
pass
| import os
basedir = os.path.abspath(os.path.dirname(__file__))
upload_dir = os.path.join(basedir, '..', 'images')
class BaseConfig:
DEBUG = False
ALLOWED_EXTENSIONS = {'jpg', 'png'}
class TestingConfig(BaseConfig):
TEST_DIR = '../tests/test_images/'
TEST_IMAGE = os.path.join(TEST_DIR, 'test.jpg')
DEBUG = True
class DevelopmentConfig(BaseConfig):
DEBUG = True
class ProductionConfig(BaseConfig):
pass
| none | 1 | 2.209578 | 2 | |
py/py_0235_an_arithmetic_geometric_sequence.py | lcsm29/project-euler | 0 | 6618110 | <gh_stars>0
# Solution of;
# Project Euler Problem 235: An Arithmetic Geometric sequence
# https://projecteuler.net/problem=235
#
# Given is the arithmetic-geometric sequence u(k) = (900-3k)rk-1. Let s(n) =
# Σk=1. . . nu(k). Find the value of r for which s(5000) = -600,000,000,000.
# Give your answer rounded to 12 places behind the decimal point.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 235
timed.caller(dummy, n, i, prob_id)
| # Solution of;
# Project Euler Problem 235: An Arithmetic Geometric sequence
# https://projecteuler.net/problem=235
#
# Given is the arithmetic-geometric sequence u(k) = (900-3k)rk-1. Let s(n) =
# Σk=1. . . nu(k). Find the value of r for which s(5000) = -600,000,000,000.
# Give your answer rounded to 12 places behind the decimal point.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 235
timed.caller(dummy, n, i, prob_id) | en | 0.66269 | # Solution of; # Project Euler Problem 235: An Arithmetic Geometric sequence # https://projecteuler.net/problem=235 # # Given is the arithmetic-geometric sequence u(k) = (900-3k)rk-1. Let s(n) = # Σk=1. . . nu(k). Find the value of r for which s(5000) = -600,000,000,000. # Give your answer rounded to 12 places behind the decimal point. # # by lcsm29 http://github.com/lcsm29/project-euler | 2.79318 | 3 |
api/api_gateway/api.py | cds-snc/scan-files | 0 | 6618111 | from aws_lambda_powertools import Metrics
from fastapi import FastAPI
from os import environ
from pydantic import BaseSettings
from starlette.middleware.base import BaseHTTPMiddleware
from uuid import uuid4
from .custom_middleware import add_security_headers
from .routers import ops, assemblyline, clamav
class Settings(BaseSettings):
openapi_url: str = environ.get("OPENAPI_URL", "")
settings = Settings()
API_AUTH_TOKEN = environ.get("API_AUTH_TOKEN", uuid4())
MLWR_HOST = environ.get("MLWR_HOST")
MLWR_USER = environ.get("MLWR_USER")
MLWR_KEY = environ.get("MLWR_KEY")
description = """
Scan files 📁 API. Submit files for malware scanning
"""
app = FastAPI(
title="Scan Files",
description=description,
version="0.0.1",
openapi_url=settings.openapi_url,
)
app.include_router(ops.router)
app.include_router(assemblyline.router, prefix="/assemblyline", tags=["assemblyline"])
app.include_router(clamav.router, prefix="/clamav", tags=["clamav"])
# https://github.com/tiangolo/fastapi/issues/1472; can't include custom middlware when running tests
if environ.get("CI") is None:
app.add_middleware(BaseHTTPMiddleware, dispatch=add_security_headers)
metrics = Metrics(namespace="ScanFiles", service="api")
| from aws_lambda_powertools import Metrics
from fastapi import FastAPI
from os import environ
from pydantic import BaseSettings
from starlette.middleware.base import BaseHTTPMiddleware
from uuid import uuid4
from .custom_middleware import add_security_headers
from .routers import ops, assemblyline, clamav
class Settings(BaseSettings):
openapi_url: str = environ.get("OPENAPI_URL", "")
settings = Settings()
API_AUTH_TOKEN = environ.get("API_AUTH_TOKEN", uuid4())
MLWR_HOST = environ.get("MLWR_HOST")
MLWR_USER = environ.get("MLWR_USER")
MLWR_KEY = environ.get("MLWR_KEY")
description = """
Scan files 📁 API. Submit files for malware scanning
"""
app = FastAPI(
title="Scan Files",
description=description,
version="0.0.1",
openapi_url=settings.openapi_url,
)
app.include_router(ops.router)
app.include_router(assemblyline.router, prefix="/assemblyline", tags=["assemblyline"])
app.include_router(clamav.router, prefix="/clamav", tags=["clamav"])
# https://github.com/tiangolo/fastapi/issues/1472; can't include custom middlware when running tests
if environ.get("CI") is None:
app.add_middleware(BaseHTTPMiddleware, dispatch=add_security_headers)
metrics = Metrics(namespace="ScanFiles", service="api")
| en | 0.809785 | Scan files 📁 API. Submit files for malware scanning # https://github.com/tiangolo/fastapi/issues/1472; can't include custom middlware when running tests | 1.930039 | 2 |
cdradmin/migrations/0021_auto_20180309_0949.py | teamshadi/ffa-cdr-admin | 0 | 6618112 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-09 09:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cdradmin', '0020_entitytopartnertype'),
]
operations = [
migrations.AlterModelOptions(
name='superidtoloanliability',
options={'ordering': ['superid__superid', 'ledger__ledger', 'subledger', 'currency_liability']},
),
migrations.AddField(
model_name='superidtoloanliability',
name='guarantee_for',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='cdradmin.SuperidToLoanLiability'),
),
migrations.AlterUniqueTogether(
name='superidtoloanliability',
unique_together=set([('superid', 'ledger', 'subledger', 'currency_liability')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-09 09:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cdradmin', '0020_entitytopartnertype'),
]
operations = [
migrations.AlterModelOptions(
name='superidtoloanliability',
options={'ordering': ['superid__superid', 'ledger__ledger', 'subledger', 'currency_liability']},
),
migrations.AddField(
model_name='superidtoloanliability',
name='guarantee_for',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='cdradmin.SuperidToLoanLiability'),
),
migrations.AlterUniqueTogether(
name='superidtoloanliability',
unique_together=set([('superid', 'ledger', 'subledger', 'currency_liability')]),
),
]
| en | 0.728343 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-03-09 09:49 | 1.619417 | 2 |
sources/praline/common/compiling/compiler.py | dansandu/praline | 0 | 6618113 | <reponame>dansandu/praline<gh_stars>0
from abc import ABC, abstractmethod
from praline.common.compiling.yield_descriptor import YieldDescriptor
from praline.common.constants import get_artifact_full_name
from praline.common.file_system import directory_name, FileSystem, get_separator, join, relative_path
from praline.common.hashing import key_delta, hash_binary, hash_file
from praline.common.reflection import subclasses_of
from typing import Any, Dict, List, Tuple
class Compiler(ABC):
@abstractmethod
def get_name(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_architecture(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_platform(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_mode(self) -> str:
raise NotImplementedError()
@abstractmethod
def matches(self) -> bool:
raise NotImplementedError()
@abstractmethod
def preprocess(self,
headers_root: str,
external_headers_root: str,
headers: List[str],
source: str) -> bytes:
raise NotImplementedError()
@abstractmethod
def compile(self,
headers_root: str,
external_headers_root: str,
headers: List[str],
source: str,
object_: str) -> None:
raise NotImplementedError()
@abstractmethod
def link_executable(self,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
executable: str,
symbols_table: str) -> None:
raise NotImplementedError()
@abstractmethod
def link_library(self,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
library: str,
library_interface: str,
symbols_table: str) -> None:
raise NotImplementedError()
@abstractmethod
def get_yield_descriptor(self) -> YieldDescriptor:
raise NotImplementedError()
def compile_using_cache(file_system: FileSystem,
compiler: Compiler,
headers_root: str,
external_headers_root: str,
sources_root: str,
objects_root: str,
headers: List[str],
sources: List[str],
cache: Dict[str, Any]) -> List[str]:
file_system.create_directory_if_missing(objects_root)
def hash_translation_unit(source):
return hash_binary(compiler.preprocess(headers_root, external_headers_root, headers, source))
updated, removed, new_cache = key_delta(sources, hash_translation_unit, cache)
objects = []
yield_descriptor = compiler.get_yield_descriptor()
for source in updated:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
compiler.compile(headers_root, external_headers_root, headers, source, object_)
for source in removed:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
if file_system.exists(object_):
file_system.remove_file(object_)
for source in sources:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
if not file_system.exists(object_):
compiler.compile(headers_root, external_headers_root, headers, source, object_)
objects.append(object_)
cache.clear()
cache.update(new_cache)
return objects
def link_executable_using_cache(file_system: FileSystem,
compiler: Compiler,
executables_root: str,
symbols_tables_root: str,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
organization: str,
artifact: str,
version: str,
cache: Dict[str, Any],
is_test: bool = False) -> Tuple[str, str]:
file_system.create_directory_if_missing(executables_root)
file_system.create_directory_if_missing(symbols_tables_root)
name = get_artifact_full_name(organization, artifact, compiler.get_architecture(),
compiler.get_platform(), compiler.get_name(),
compiler.get_mode(), version) + ('.test' if is_test else '')
cache['input'] = input_ = cache.get('input', {})
(old_executable,
old_symbols_table) = cache.get('output', (None, None))
hasher = lambda path: hash_file(file_system, path)
updated, removed, new_cache = key_delta(objects + external_libraries + external_libraries_interfaces, hasher, input_)
yield_descriptor = compiler.get_yield_descriptor()
executable = yield_descriptor.get_executable(executables_root, name)
symbols_table = yield_descriptor.get_symbols_table(symbols_tables_root, name)
remake_executable = executable and not file_system.exists(executable)
remake_symbols_table = symbols_table and not file_system.exists(symbols_table)
if updated or removed or remake_executable or remake_symbols_table:
if old_executable and file_system.exists(old_executable):
file_system.remove_file(old_executable)
if old_symbols_table and file_system.exists(old_symbols_table):
file_system.remove_file(old_symbols_table)
compiler.link_executable(external_libraries_root,
external_libraries_interfaces_root,
objects,
external_libraries,
external_libraries_interfaces,
executable,
symbols_table)
cache['input'] = new_cache
cache['output'] = (executable, symbols_table)
return (executable, symbols_table)
def link_library_using_cache(file_system: FileSystem,
compiler: Compiler,
libraries_root: str,
libraries_interfaces_root: str,
symbols_tables_root: str,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
organization: str,
artifact: str,
version: str,
cache: Dict[str, Any]) -> Tuple[str, str, str]:
file_system.create_directory_if_missing(libraries_root)
file_system.create_directory_if_missing(libraries_interfaces_root)
file_system.create_directory_if_missing(symbols_tables_root)
name = get_artifact_full_name(organization, artifact, compiler.get_architecture(),
compiler.get_platform(), compiler.get_name(),
compiler.get_mode(), version)
cache['input'] = input_ = cache.get('input', {})
(old_library,
old_library_interface,
old_symbols_table) = cache.get('output', (None, None, None))
hasher = lambda path: hash_file(file_system, path)
updated, removed, new_cache = key_delta(objects + external_libraries + external_libraries_interfaces, hasher, input_)
yield_descriptor = compiler.get_yield_descriptor()
library = yield_descriptor.get_library(libraries_root, name)
library_interface = yield_descriptor.get_library_interface(libraries_interfaces_root, name)
symbols_table = yield_descriptor.get_symbols_table(symbols_tables_root, name)
remake_library = library and not file_system.exists(library)
remake_library_interface = library_interface and not file_system.exists(library_interface)
remake_symbols_table = symbols_table and not file_system.exists(symbols_table)
if updated or removed or remake_library or remake_library_interface or remake_symbols_table:
if old_library and file_system.exists(old_library):
file_system.remove_file(old_library)
if old_library_interface and file_system.exists(old_library_interface):
file_system.remove_file(old_library_interface)
if old_symbols_table and file_system.exists(old_symbols_table):
file_system.remove_file(old_symbols_table)
compiler.link_library(external_libraries_root,
external_libraries_interfaces_root,
objects,
external_libraries,
external_libraries_interfaces,
library,
library_interface,
symbols_table)
cache['input'] = new_cache
cache['output'] = (library, library_interface, symbols_table)
return (library, library_interface, symbols_table)
def get_compilers(file_system: FileSystem, architecture: str, platform: str, mode: str, logging_level: int) -> List[Compiler]:
compilers = [klass(file_system, architecture, platform, mode, logging_level) for klass in subclasses_of(Compiler)]
duplicates = [(i, j) for i in range(len(compilers)) for j in range(i + 1, len(compilers)) if compilers[i].get_name() == compilers[j].get_name()]
if duplicates:
raise RuntimeError(f"multiple compilers defined with the same name '{compilers[duplicates[0][0]].get_name()}'")
return compilers
def get_compiler(file_system: FileSystem, name: str, architecture: str, platform: str, mode: str, logging_level: int) -> Compiler:
compilers = [klass(file_system, architecture, platform, mode, logging_level) for klass in subclasses_of(Compiler)]
matching = [compiler for compiler in compilers if compiler.get_name() == name]
if not matching:
raise RuntimeError(f"no compiler named '{name}' was found")
return matching[0]
| from abc import ABC, abstractmethod
from praline.common.compiling.yield_descriptor import YieldDescriptor
from praline.common.constants import get_artifact_full_name
from praline.common.file_system import directory_name, FileSystem, get_separator, join, relative_path
from praline.common.hashing import key_delta, hash_binary, hash_file
from praline.common.reflection import subclasses_of
from typing import Any, Dict, List, Tuple
class Compiler(ABC):
@abstractmethod
def get_name(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_architecture(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_platform(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_mode(self) -> str:
raise NotImplementedError()
@abstractmethod
def matches(self) -> bool:
raise NotImplementedError()
@abstractmethod
def preprocess(self,
headers_root: str,
external_headers_root: str,
headers: List[str],
source: str) -> bytes:
raise NotImplementedError()
@abstractmethod
def compile(self,
headers_root: str,
external_headers_root: str,
headers: List[str],
source: str,
object_: str) -> None:
raise NotImplementedError()
@abstractmethod
def link_executable(self,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
executable: str,
symbols_table: str) -> None:
raise NotImplementedError()
@abstractmethod
def link_library(self,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
library: str,
library_interface: str,
symbols_table: str) -> None:
raise NotImplementedError()
@abstractmethod
def get_yield_descriptor(self) -> YieldDescriptor:
raise NotImplementedError()
def compile_using_cache(file_system: FileSystem,
compiler: Compiler,
headers_root: str,
external_headers_root: str,
sources_root: str,
objects_root: str,
headers: List[str],
sources: List[str],
cache: Dict[str, Any]) -> List[str]:
file_system.create_directory_if_missing(objects_root)
def hash_translation_unit(source):
return hash_binary(compiler.preprocess(headers_root, external_headers_root, headers, source))
updated, removed, new_cache = key_delta(sources, hash_translation_unit, cache)
objects = []
yield_descriptor = compiler.get_yield_descriptor()
for source in updated:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
compiler.compile(headers_root, external_headers_root, headers, source, object_)
for source in removed:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
if file_system.exists(object_):
file_system.remove_file(object_)
for source in sources:
object_ = yield_descriptor.get_object(sources_root, objects_root, source)
if not file_system.exists(object_):
compiler.compile(headers_root, external_headers_root, headers, source, object_)
objects.append(object_)
cache.clear()
cache.update(new_cache)
return objects
def link_executable_using_cache(file_system: FileSystem,
compiler: Compiler,
executables_root: str,
symbols_tables_root: str,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
organization: str,
artifact: str,
version: str,
cache: Dict[str, Any],
is_test: bool = False) -> Tuple[str, str]:
file_system.create_directory_if_missing(executables_root)
file_system.create_directory_if_missing(symbols_tables_root)
name = get_artifact_full_name(organization, artifact, compiler.get_architecture(),
compiler.get_platform(), compiler.get_name(),
compiler.get_mode(), version) + ('.test' if is_test else '')
cache['input'] = input_ = cache.get('input', {})
(old_executable,
old_symbols_table) = cache.get('output', (None, None))
hasher = lambda path: hash_file(file_system, path)
updated, removed, new_cache = key_delta(objects + external_libraries + external_libraries_interfaces, hasher, input_)
yield_descriptor = compiler.get_yield_descriptor()
executable = yield_descriptor.get_executable(executables_root, name)
symbols_table = yield_descriptor.get_symbols_table(symbols_tables_root, name)
remake_executable = executable and not file_system.exists(executable)
remake_symbols_table = symbols_table and not file_system.exists(symbols_table)
if updated or removed or remake_executable or remake_symbols_table:
if old_executable and file_system.exists(old_executable):
file_system.remove_file(old_executable)
if old_symbols_table and file_system.exists(old_symbols_table):
file_system.remove_file(old_symbols_table)
compiler.link_executable(external_libraries_root,
external_libraries_interfaces_root,
objects,
external_libraries,
external_libraries_interfaces,
executable,
symbols_table)
cache['input'] = new_cache
cache['output'] = (executable, symbols_table)
return (executable, symbols_table)
def link_library_using_cache(file_system: FileSystem,
compiler: Compiler,
libraries_root: str,
libraries_interfaces_root: str,
symbols_tables_root: str,
external_libraries_root: str,
external_libraries_interfaces_root: str,
objects: List[str],
external_libraries: List[str],
external_libraries_interfaces: List[str],
organization: str,
artifact: str,
version: str,
cache: Dict[str, Any]) -> Tuple[str, str, str]:
file_system.create_directory_if_missing(libraries_root)
file_system.create_directory_if_missing(libraries_interfaces_root)
file_system.create_directory_if_missing(symbols_tables_root)
name = get_artifact_full_name(organization, artifact, compiler.get_architecture(),
compiler.get_platform(), compiler.get_name(),
compiler.get_mode(), version)
cache['input'] = input_ = cache.get('input', {})
(old_library,
old_library_interface,
old_symbols_table) = cache.get('output', (None, None, None))
hasher = lambda path: hash_file(file_system, path)
updated, removed, new_cache = key_delta(objects + external_libraries + external_libraries_interfaces, hasher, input_)
yield_descriptor = compiler.get_yield_descriptor()
library = yield_descriptor.get_library(libraries_root, name)
library_interface = yield_descriptor.get_library_interface(libraries_interfaces_root, name)
symbols_table = yield_descriptor.get_symbols_table(symbols_tables_root, name)
remake_library = library and not file_system.exists(library)
remake_library_interface = library_interface and not file_system.exists(library_interface)
remake_symbols_table = symbols_table and not file_system.exists(symbols_table)
if updated or removed or remake_library or remake_library_interface or remake_symbols_table:
if old_library and file_system.exists(old_library):
file_system.remove_file(old_library)
if old_library_interface and file_system.exists(old_library_interface):
file_system.remove_file(old_library_interface)
if old_symbols_table and file_system.exists(old_symbols_table):
file_system.remove_file(old_symbols_table)
compiler.link_library(external_libraries_root,
external_libraries_interfaces_root,
objects,
external_libraries,
external_libraries_interfaces,
library,
library_interface,
symbols_table)
cache['input'] = new_cache
cache['output'] = (library, library_interface, symbols_table)
return (library, library_interface, symbols_table)
def get_compilers(file_system: FileSystem, architecture: str, platform: str, mode: str, logging_level: int) -> List[Compiler]:
compilers = [klass(file_system, architecture, platform, mode, logging_level) for klass in subclasses_of(Compiler)]
duplicates = [(i, j) for i in range(len(compilers)) for j in range(i + 1, len(compilers)) if compilers[i].get_name() == compilers[j].get_name()]
if duplicates:
raise RuntimeError(f"multiple compilers defined with the same name '{compilers[duplicates[0][0]].get_name()}'")
return compilers
def get_compiler(file_system: FileSystem, name: str, architecture: str, platform: str, mode: str, logging_level: int) -> Compiler:
compilers = [klass(file_system, architecture, platform, mode, logging_level) for klass in subclasses_of(Compiler)]
matching = [compiler for compiler in compilers if compiler.get_name() == name]
if not matching:
raise RuntimeError(f"no compiler named '{name}' was found")
return matching[0] | none | 1 | 2.036037 | 2 | |
archerysettings/load_settings.py | GMedian/archerysec | 0 | 6618114 | # _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
#/_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
""" Author: <NAME> """
import os
import json
from django.core import signing
from archerysettings.models import zap_settings_db, burp_setting_db, openvas_setting_db
class ArcherySettings:
def __init__(self, setting_file):
self.setting_file = setting_file
def zap_api_key(self):
"""
Loading ZAP API Key from setting file.
:return:
"""
apikey = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
apikey = zap.zap_api
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# load_api_key = data['zap_api_key']
# apikey = signing.loads(load_api_key)
# except Exception as e:
# print e
return apikey
def zap_host(self):
"""
Loading ZAP Host from setting file.
:return:
"""
zapath = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zapath = zap.zap_url
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# zapath = data['zap_path']
#
# except Exception as e:
# print e
return zapath
def zap_port(self):
"""
Loading ZAP Port from setting file.
:return:
"""
zaport = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zaport = zap.zap_port
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# zaport = data['zap_port']
#
# except Exception as e:
# print "Error in setting file as", e
return zaport
def burp_host(self):
"""
Loading Burp Host address from setting file.
:return:
"""
burphost = None
all_burp = burp_setting_db.objects.all()
for burp in all_burp:
burphost = burp.burp_url
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# burphost = data['burp_path']
# except Exception as e:
# print "Error in setting file as", e
return burphost
def burp_port(self):
"""
Loading Burp port from setting file.
:return:
"""
burport = None
all_burp = burp_setting_db.objects.all()
for burp in all_burp:
burport = burp.burp_port
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# burport = data['burp_port']
# except Exception as e:
# print "Error in setting file as", e
return burport
def openvas_host(self):
"""
Loading OpenVAS Setting from setting file.
:return:
"""
openvashost = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvashost = openvas.host
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvashost = data['open_vas_ip']
# except Exception as e:
# print "Error in setting file as", e
return openvashost
def openvas_username(self):
"""
Loading OpenVAS Username from setting file.
:return:
"""
openvas_username = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvas_username = openvas.user
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvas_username = data['open_vas_user']
# except Exception as e:
# print "Error in setting file as", e
return openvas_username
def openvas_pass(self):
"""
Loading OpenVAS Password from setting file.
:return:
"""
openvas_password = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvas_password = <PASSWORD>
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvas_password = data['<PASSWORD>']
# except Exception as e:
# print "Error in setting file as", e
return openvas_password
def email_subject(self):
"""
Load Email Subject from setting file.
:return:
"""
email_sub = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
email_sub = data['email_subject']
except Exception as e:
print "Error in setting file as", e
return email_sub
def email_from(self):
"""
:return:
"""
emails_from = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
emails_from = data['from_email']
except Exception as e:
print "Error in setting file as", e
return emails_from
def email_to(self):
"""
:return:
"""
emails_to = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
emails_to = data['to_email']
except Exception as e:
print "Error in setting file as", e
return emails_to | # _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
#/_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
""" Author: <NAME> """
import os
import json
from django.core import signing
from archerysettings.models import zap_settings_db, burp_setting_db, openvas_setting_db
class ArcherySettings:
def __init__(self, setting_file):
self.setting_file = setting_file
def zap_api_key(self):
"""
Loading ZAP API Key from setting file.
:return:
"""
apikey = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
apikey = zap.zap_api
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# load_api_key = data['zap_api_key']
# apikey = signing.loads(load_api_key)
# except Exception as e:
# print e
return apikey
def zap_host(self):
"""
Loading ZAP Host from setting file.
:return:
"""
zapath = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zapath = zap.zap_url
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# zapath = data['zap_path']
#
# except Exception as e:
# print e
return zapath
def zap_port(self):
"""
Loading ZAP Port from setting file.
:return:
"""
zaport = None
all_zap = zap_settings_db.objects.all()
for zap in all_zap:
zaport = zap.zap_port
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# zaport = data['zap_port']
#
# except Exception as e:
# print "Error in setting file as", e
return zaport
def burp_host(self):
"""
Loading Burp Host address from setting file.
:return:
"""
burphost = None
all_burp = burp_setting_db.objects.all()
for burp in all_burp:
burphost = burp.burp_url
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# burphost = data['burp_path']
# except Exception as e:
# print "Error in setting file as", e
return burphost
def burp_port(self):
"""
Loading Burp port from setting file.
:return:
"""
burport = None
all_burp = burp_setting_db.objects.all()
for burp in all_burp:
burport = burp.burp_port
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# burport = data['burp_port']
# except Exception as e:
# print "Error in setting file as", e
return burport
def openvas_host(self):
"""
Loading OpenVAS Setting from setting file.
:return:
"""
openvashost = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvashost = openvas.host
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvashost = data['open_vas_ip']
# except Exception as e:
# print "Error in setting file as", e
return openvashost
def openvas_username(self):
"""
Loading OpenVAS Username from setting file.
:return:
"""
openvas_username = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvas_username = openvas.user
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvas_username = data['open_vas_user']
# except Exception as e:
# print "Error in setting file as", e
return openvas_username
def openvas_pass(self):
"""
Loading OpenVAS Password from setting file.
:return:
"""
openvas_password = None
all_openvas = openvas_setting_db.objects.all()
for openvas in all_openvas:
openvas_password = <PASSWORD>
# try:
# with open(self.setting_file, 'r+') as f:
# data = json.load(f)
# openvas_password = data['<PASSWORD>']
# except Exception as e:
# print "Error in setting file as", e
return openvas_password
def email_subject(self):
"""
Load Email Subject from setting file.
:return:
"""
email_sub = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
email_sub = data['email_subject']
except Exception as e:
print "Error in setting file as", e
return email_sub
def email_from(self):
"""
:return:
"""
emails_from = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
emails_from = data['from_email']
except Exception as e:
print "Error in setting file as", e
return emails_from
def email_to(self):
"""
:return:
"""
emails_to = None
try:
with open(self.setting_file, 'r+') as f:
data = json.load(f)
emails_to = data['to_email']
except Exception as e:
print "Error in setting file as", e
return emails_to | en | 0.769721 | # _ # /\ | | # / \ _ __ ___| |__ ___ _ __ _ _ # / /\ \ | '__/ __| '_ \ / _ \ '__| | | | # / ____ \| | | (__| | | | __/ | | |_| | #/_/ \_\_| \___|_| |_|\___|_| \__, | # __/ | # |___/ # Copyright (C) 2017-2018 ArcherySec # This file is part of ArcherySec Project. Author: <NAME> Loading ZAP API Key from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # load_api_key = data['zap_api_key'] # apikey = signing.loads(load_api_key) # except Exception as e: # print e Loading ZAP Host from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # zapath = data['zap_path'] # # except Exception as e: # print e Loading ZAP Port from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # zaport = data['zap_port'] # # except Exception as e: # print "Error in setting file as", e Loading Burp Host address from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # burphost = data['burp_path'] # except Exception as e: # print "Error in setting file as", e Loading Burp port from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # burport = data['burp_port'] # except Exception as e: # print "Error in setting file as", e Loading OpenVAS Setting from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # openvashost = data['open_vas_ip'] # except Exception as e: # print "Error in setting file as", e Loading OpenVAS Username from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # openvas_username = data['open_vas_user'] # except Exception as e: # print "Error in setting file as", e Loading OpenVAS Password from setting file. :return: # try: # with open(self.setting_file, 'r+') as f: # data = json.load(f) # openvas_password = data['<PASSWORD>'] # except Exception as e: # print "Error in setting file as", e Load Email Subject from setting file. :return: :return: :return: | 2.190394 | 2 |
src/FLABasicTools/overlap.py | Fair-Lines-America/FLA_basic_tools | 0 | 6618115 | <filename>src/FLABasicTools/overlap.py
import pandas as pd
import geopandas as gpd
from bs4 import BeautifulSoup, SoupStrainer
import zipfile
import wget
import os
import requests
from datetime import datetime
from zipfile import ZipFile
from urllib.request import urlopen
def getData(State=None):
dir_list = os.listdir()
f = None
state_ids = pd.read_csv(r'https://raw.githubusercontent.com/Fair-Lines-America/FLA_basic_tools/main/data/us-state-ansi-fips.csv',skipinitialspace=True, dtype=str)
st = state_ids[state_ids['st']==State]
st = st['stname'].iloc[0]
if '2020_PLSummaryFile_FieldNames.xlsx' not in dir_list:
headers='https://www2.census.gov/programs-surveys/decennial/rdo/about/2020-census-program/Phase3/SupportMaterials/2020_PLSummaryFile_FieldNames.xlsx'
wget.download(headers)
url = f'https://www2.census.gov/programs-surveys/decennial/2020/data/01-Redistricting_File--PL_94-171/{st}/'
r = requests.get(url)
res = r.content
for link in BeautifulSoup(res, parse_only=SoupStrainer('a')):
if 'zip' in link.contents[0]:
f = link.contents[0]
if f not in dir_list:
file = f'{url}{f}'
wget.download(file)
if f'PL94_blocks_{State}.csv' not in dir_list:
df_header1 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 1 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_header2 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 2 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_header3 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 3 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_headergeo = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Geoheader Definitions').dropna(axis=0, how='all').reset_index(drop=True)
header_replace_1 = {i :None for i in range(0,len(df_header1.index)) }
header_replace_2 = {i :None for i in range(0,len(df_header2.index)) }
header_replace_3 = {i :None for i in range(0,len(df_header3.index)) }
header_replace_geo = {i :None for i in range(0,len(df_headergeo.index)) }
array = [[df_header1,header_replace_1, '1'],[df_header2,header_replace_2,'2'],[df_header3,header_replace_3,'3'],[df_headergeo,header_replace_geo,'o']]
for i in array:
json = i[1]
header = i[0]
for key in json.keys():
json[key] = header.iloc[key][1]
archive = zipfile.ZipFile(f, 'r')
csv = []
for i in archive.infolist():
temp = archive.open(i)
fileName = temp.name.split('.')[0]
fileNum = fileName[-5:][0]
df = pd.read_csv(temp, sep="|", header=None, low_memory=False ,encoding = "ISO-8859-1")
for j in array:
if fileNum == j[2] :
df = df.rename(columns=j[1])
df.to_csv(f'{fileName}.csv', index=False)
csv.append(fileName)
join_on = ['STUSAB','LOGRECNO']
df_out = None
for i in csv:
if df_out is None:
df_out = pd.read_csv(f'{i}.csv', low_memory=False, dtype={'FILEID':'str','STUSAB':'str','CHARITER':'str','CIFSN':'str','LOGRECNO':'str'})
continue
else:
df = pd.read_csv(f'{i}.csv', low_memory=False, dtype={'FILEID':'str','STUSAB':'str','CHARITER':'str','CIFSN':'str','LOGRECNO':'str'})
df_out = df_out.merge(df, on=join_on, suffixes=('', '_y'))
delt = []
for k in df_out.columns:
if '_y' in k:
delt.append(k)
df_out = df_out.drop(columns=delt)
r = [i for i in df_out.columns if i[0:2] != 'P0' or i != 'GEOID20']
df_out = df_out[df['SUMLEV'] == 750]
keep = ['POP100',
'CD116',
'SLDU18',
'SLDL18',
'GEOCODE']
r = [i for i in df_out.columns if i not in keep]
df_out = df_out.drop(columns=r)
df_out.to_csv(f'PL94_blocks_{State}.csv', index=False)
for i in csv:
os.remove(f'{i}.csv')
else:
df_out = pd.read_csv(f'PL94_blocks_{State}.csv', dtype={'GEOCODE':str,'CD116':str,'SLDU18':str,'SLDL18':str})
return df_out
##### PROCESS Off PL94 File ######
def Overlap_old_new(new_districts, geoid='GEOID', district='District', leg=None):
##### CHECKS ######
if leg not in ['CD116','SLDU18','SLDL18']:
raise Exception('Please Choose Legislative level Default \'CD116\',\'SLDU18\',\'SLDL18\'')
if not isinstance(new_districts ,pd.DataFrame):
raise Exception('Please Supply a path to a CSV or DataFrame for new districts')
if district not in new_districts.columns:
raise Exception('District column not given or is missing from a DataFrame')
if geoid not in new_districts.columns:
raise Exception('GEOID column not given or is missing from a DataFrame')
fips = new_districts[geoid].iloc[0]
old_org = getData(State=str(fips[0:2]))
old_org = old_org.rename(columns={'GEOCODE':geoid,leg:district})
data = old_org[[geoid,'POP100']]
old_districts = old_org[[geoid,district]]
##### PROCESS ######
groupby_pop = district
old = data.merge(old_districts, on=geoid)
new = data.merge(new_districts, on=geoid)
new = new.drop_duplicates()
old = old.drop_duplicates()
cross = old[[geoid,district]].merge(new,
on=geoid,
suffixes=('_old', '_new'))
cross = cross.drop_duplicates()
new = new.drop(columns=geoid)
old = old.drop(columns=geoid)
cross = cross.drop(columns=geoid)
old = old.groupby([groupby_pop], as_index=False).sum()
new = new.groupby([groupby_pop], as_index=False).sum()
new_group = [district+'_old',district+'_new']
cross_old, cross_new = [district+'_old',district+'_new']
groupby_cross = district
cross = cross.groupby([f'{groupby_cross}_old',f'{groupby_cross}_new'], as_index=False ).sum()
new = cross.merge(new, left_on=cross_new, right_on=district,suffixes=('_new', '_cross'))
old = cross.merge(old, left_on=cross_old, right_on=district,suffixes=('_old', '_cross'))
out = new.merge(old, on=[f'{district}_new',f'{district}_old'] , suffixes=('', '_d'))
r = [district]
for col in out.columns:
if '_d' in col:
r.append(col)
out = out.drop(columns=r)
## OUTPUT
out.to_csv(f'out_population_overlap.csv',index=False)
return out
##### PROCESS Off 2 BAF ######
def Overlap_compare(old_districts, new_districts, data, geoid='GEOID', district='District'):
##### CHECKS ######
if district not in old_districts.columns and district not in new_districts.columns:
raise Exception('District column not given or is missing from a DataFrame')
if geoid not in old_districts.columns and geoid not in new_districts.columns and geoid not in data.columns:
raise Exception('GEOID column not given or is missing from a DataFrame')
##### PROCESS ######
groupby_pop = district
old = data.merge(old_districts, on=geoid)
new = data.merge(new_districts, on=geoid)
new = new.drop_duplicates()
old = old.drop_duplicates()
cross = old[[geoid,district]].merge(new,
on=geoid,
suffixes=('_old', '_new'))
cross = cross.drop_duplicates()
new = new.drop(columns=geoid)
old = old.drop(columns=geoid)
cross = cross.drop(columns=geoid)
old = old.groupby([groupby_pop], as_index=False).sum()
new = new.groupby([groupby_pop], as_index=False).sum()
new_group = [district+'_old',district+'_new']
cross_old, cross_new = [district+'_old',district+'_new']
groupby_cross = district
cross = cross.groupby([f'{groupby_cross}_old',f'{groupby_cross}_new'], as_index=False ).sum()
new = cross.merge(new, left_on=cross_new, right_on=district,suffixes=('_new', '_cross'))
old = cross.merge(old, left_on=cross_old, right_on=district,suffixes=('_old', '_cross'))
out = new.merge(old, on=[f'{district}_new',f'{district}_old'] , suffixes=('', '_d'))
r = [district]
for col in out.columns:
if '_d' in col:
r.append(col)
out = out.drop(columns=r)
## OUTPUT
out.to_csv(f'out_population_overlap.csv',index=False)
return out | <filename>src/FLABasicTools/overlap.py
import pandas as pd
import geopandas as gpd
from bs4 import BeautifulSoup, SoupStrainer
import zipfile
import wget
import os
import requests
from datetime import datetime
from zipfile import ZipFile
from urllib.request import urlopen
def getData(State=None):
dir_list = os.listdir()
f = None
state_ids = pd.read_csv(r'https://raw.githubusercontent.com/Fair-Lines-America/FLA_basic_tools/main/data/us-state-ansi-fips.csv',skipinitialspace=True, dtype=str)
st = state_ids[state_ids['st']==State]
st = st['stname'].iloc[0]
if '2020_PLSummaryFile_FieldNames.xlsx' not in dir_list:
headers='https://www2.census.gov/programs-surveys/decennial/rdo/about/2020-census-program/Phase3/SupportMaterials/2020_PLSummaryFile_FieldNames.xlsx'
wget.download(headers)
url = f'https://www2.census.gov/programs-surveys/decennial/2020/data/01-Redistricting_File--PL_94-171/{st}/'
r = requests.get(url)
res = r.content
for link in BeautifulSoup(res, parse_only=SoupStrainer('a')):
if 'zip' in link.contents[0]:
f = link.contents[0]
if f not in dir_list:
file = f'{url}{f}'
wget.download(file)
if f'PL94_blocks_{State}.csv' not in dir_list:
df_header1 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 1 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_header2 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 2 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_header3 = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Segment 3 Definitions').dropna(axis=0, how='all').reset_index(drop=True)
df_headergeo = pd.read_excel('2020_PLSummaryFile_FieldNames.xlsx', sheet_name='2020 P.L. Geoheader Definitions').dropna(axis=0, how='all').reset_index(drop=True)
header_replace_1 = {i :None for i in range(0,len(df_header1.index)) }
header_replace_2 = {i :None for i in range(0,len(df_header2.index)) }
header_replace_3 = {i :None for i in range(0,len(df_header3.index)) }
header_replace_geo = {i :None for i in range(0,len(df_headergeo.index)) }
array = [[df_header1,header_replace_1, '1'],[df_header2,header_replace_2,'2'],[df_header3,header_replace_3,'3'],[df_headergeo,header_replace_geo,'o']]
for i in array:
json = i[1]
header = i[0]
for key in json.keys():
json[key] = header.iloc[key][1]
archive = zipfile.ZipFile(f, 'r')
csv = []
for i in archive.infolist():
temp = archive.open(i)
fileName = temp.name.split('.')[0]
fileNum = fileName[-5:][0]
df = pd.read_csv(temp, sep="|", header=None, low_memory=False ,encoding = "ISO-8859-1")
for j in array:
if fileNum == j[2] :
df = df.rename(columns=j[1])
df.to_csv(f'{fileName}.csv', index=False)
csv.append(fileName)
join_on = ['STUSAB','LOGRECNO']
df_out = None
for i in csv:
if df_out is None:
df_out = pd.read_csv(f'{i}.csv', low_memory=False, dtype={'FILEID':'str','STUSAB':'str','CHARITER':'str','CIFSN':'str','LOGRECNO':'str'})
continue
else:
df = pd.read_csv(f'{i}.csv', low_memory=False, dtype={'FILEID':'str','STUSAB':'str','CHARITER':'str','CIFSN':'str','LOGRECNO':'str'})
df_out = df_out.merge(df, on=join_on, suffixes=('', '_y'))
delt = []
for k in df_out.columns:
if '_y' in k:
delt.append(k)
df_out = df_out.drop(columns=delt)
r = [i for i in df_out.columns if i[0:2] != 'P0' or i != 'GEOID20']
df_out = df_out[df['SUMLEV'] == 750]
keep = ['POP100',
'CD116',
'SLDU18',
'SLDL18',
'GEOCODE']
r = [i for i in df_out.columns if i not in keep]
df_out = df_out.drop(columns=r)
df_out.to_csv(f'PL94_blocks_{State}.csv', index=False)
for i in csv:
os.remove(f'{i}.csv')
else:
df_out = pd.read_csv(f'PL94_blocks_{State}.csv', dtype={'GEOCODE':str,'CD116':str,'SLDU18':str,'SLDL18':str})
return df_out
##### PROCESS Off PL94 File ######
def Overlap_old_new(new_districts, geoid='GEOID', district='District', leg=None):
##### CHECKS ######
if leg not in ['CD116','SLDU18','SLDL18']:
raise Exception('Please Choose Legislative level Default \'CD116\',\'SLDU18\',\'SLDL18\'')
if not isinstance(new_districts ,pd.DataFrame):
raise Exception('Please Supply a path to a CSV or DataFrame for new districts')
if district not in new_districts.columns:
raise Exception('District column not given or is missing from a DataFrame')
if geoid not in new_districts.columns:
raise Exception('GEOID column not given or is missing from a DataFrame')
fips = new_districts[geoid].iloc[0]
old_org = getData(State=str(fips[0:2]))
old_org = old_org.rename(columns={'GEOCODE':geoid,leg:district})
data = old_org[[geoid,'POP100']]
old_districts = old_org[[geoid,district]]
##### PROCESS ######
groupby_pop = district
old = data.merge(old_districts, on=geoid)
new = data.merge(new_districts, on=geoid)
new = new.drop_duplicates()
old = old.drop_duplicates()
cross = old[[geoid,district]].merge(new,
on=geoid,
suffixes=('_old', '_new'))
cross = cross.drop_duplicates()
new = new.drop(columns=geoid)
old = old.drop(columns=geoid)
cross = cross.drop(columns=geoid)
old = old.groupby([groupby_pop], as_index=False).sum()
new = new.groupby([groupby_pop], as_index=False).sum()
new_group = [district+'_old',district+'_new']
cross_old, cross_new = [district+'_old',district+'_new']
groupby_cross = district
cross = cross.groupby([f'{groupby_cross}_old',f'{groupby_cross}_new'], as_index=False ).sum()
new = cross.merge(new, left_on=cross_new, right_on=district,suffixes=('_new', '_cross'))
old = cross.merge(old, left_on=cross_old, right_on=district,suffixes=('_old', '_cross'))
out = new.merge(old, on=[f'{district}_new',f'{district}_old'] , suffixes=('', '_d'))
r = [district]
for col in out.columns:
if '_d' in col:
r.append(col)
out = out.drop(columns=r)
## OUTPUT
out.to_csv(f'out_population_overlap.csv',index=False)
return out
##### PROCESS Off 2 BAF ######
def Overlap_compare(old_districts, new_districts, data, geoid='GEOID', district='District'):
##### CHECKS ######
if district not in old_districts.columns and district not in new_districts.columns:
raise Exception('District column not given or is missing from a DataFrame')
if geoid not in old_districts.columns and geoid not in new_districts.columns and geoid not in data.columns:
raise Exception('GEOID column not given or is missing from a DataFrame')
##### PROCESS ######
groupby_pop = district
old = data.merge(old_districts, on=geoid)
new = data.merge(new_districts, on=geoid)
new = new.drop_duplicates()
old = old.drop_duplicates()
cross = old[[geoid,district]].merge(new,
on=geoid,
suffixes=('_old', '_new'))
cross = cross.drop_duplicates()
new = new.drop(columns=geoid)
old = old.drop(columns=geoid)
cross = cross.drop(columns=geoid)
old = old.groupby([groupby_pop], as_index=False).sum()
new = new.groupby([groupby_pop], as_index=False).sum()
new_group = [district+'_old',district+'_new']
cross_old, cross_new = [district+'_old',district+'_new']
groupby_cross = district
cross = cross.groupby([f'{groupby_cross}_old',f'{groupby_cross}_new'], as_index=False ).sum()
new = cross.merge(new, left_on=cross_new, right_on=district,suffixes=('_new', '_cross'))
old = cross.merge(old, left_on=cross_old, right_on=district,suffixes=('_old', '_cross'))
out = new.merge(old, on=[f'{district}_new',f'{district}_old'] , suffixes=('', '_d'))
r = [district]
for col in out.columns:
if '_d' in col:
r.append(col)
out = out.drop(columns=r)
## OUTPUT
out.to_csv(f'out_population_overlap.csv',index=False)
return out | de | 0.587345 | ##### PROCESS Off PL94 File ###### ##### CHECKS ###### ##### PROCESS ###### ## OUTPUT ##### PROCESS Off 2 BAF ###### ##### CHECKS ###### ##### PROCESS ###### ## OUTPUT | 2.596306 | 3 |
scan/test/fetch/api_fetch/test_api_access.py | korenlev/calipso-cvim | 0 | 6618116 | <reponame>korenlev/calipso-cvim<filename>scan/test/fetch/api_fetch/test_api_access.py
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import unittest
from unittest.mock import MagicMock, Mock
import copy
import requests
from scan.fetchers.api.api_access import ApiAccess
from scan.test.fetch.api_fetch.test_data.api_access import *
from scan.test.fetch.api_fetch.test_data.regions import REGIONS
from scan.test.fetch.test_fetch import TestFetch
class TestApiAccess(TestFetch):
def setUp(self):
super().setUp()
self.configure_environment()
self.api_access = ApiAccess()
self.set_regions_for_fetcher(self.api_access)
def test_parse_time_without_dot_in_time(self):
time = self.api_access.keystone_client.parse_time(TIME_WITHOUT_DOT)
self.assertNotEqual(time, None, "Can't parse the time without dot")
def test_parse_time_with_dot_in_time(self):
time = self.api_access.keystone_client.parse_time(TIME_WITH_DOT)
self.assertNotEqual(time, None, "Can't parse the time with dot")
def test_parse_illegal_time(self):
time = self.api_access.keystone_client.parse_time(ILLEGAL_TIME)
self.assertEqual(time, None,
"Can't get None when the time format is wrong")
def test_get_existing_token(self):
self.api_access.keystone_client.tokens = VALID_TOKENS
token = self.api_access.keystone_client.get_existing_token(PROJECT)
self.assertNotEqual(token, VALID_TOKENS[PROJECT],
"Can't get existing token")
def test_get_nonexistent_token(self):
self.api_access.keystone_client.tokens = EMPTY_TOKENS
token = self.api_access.keystone_client.get_existing_token(TEST_PROJECT)
self.assertEqual(token, None,
"Can't get None when the token doesn't exist "
"in tokens")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth(self):
self.api_access.keystone_client.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# mock authentication info from OpenStack Api
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertNotEqual(token_details, None, "Can't get the token details")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_content(self):
self.api_access.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=ERROR_AUTH_CONTENT)
# authentication content from OpenStack Api will be incorrect
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertIs(token_details, None,
"Can't get None when the content is wrong")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_token(self):
self.response.status_code = requests.codes.bad_request
self.response.json = Mock(return_value=ERROR_TOKEN_CONTENT)
# authentication info from OpenStack Api will not contain token info
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertIs(token_details, None, "Can't get None when the content " +
"doesn't contain any token info")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_expiry_time(self):
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# store original parse_time method
original_method = self.api_access.parse_time
# the time will not be parsed
self.api_access.parse_time = MagicMock(return_value=None)
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
# reset original parse_time method
self.api_access.parse_time = original_method
self.assertIs(token_details, None,
"Can't get None when the time in token can't be parsed")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_pwd(self):
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# mock the authentication info from OpenStack Api
token = self.api_access.v2_auth_pwd(PROJECT)
self.assertNotEqual(token, None, "Can't get token")
def test_get_url(self):
get_response = copy.deepcopy(self.response)
get_response.status_code = requests.codes.ok
self.requests_get = requests.get
requests.get = MagicMock(return_value=get_response)
get_response.json = Mock(return_value=GET_CONTENT)
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
# check whether it returns content message when the response is correct
self.assertNotEqual(result, None, "Can't get content when the "
"response is correct")
requests.get = self.requests_get
def test_get_url_with_error_response(self):
get_response = copy.deepcopy(self.response)
get_response.status_code = requests.codes.bad_request
get_response.text = "Bad request"
get_response.json = Mock(return_value=GET_CONTENT)
self.requests_get = requests.get
requests.get = MagicMock(return_value=get_response)
# the response will be wrong
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
self.assertEqual(result, None, "Result returned" +
"when the response status is not 200")
requests.get = self.requests_get
def test_get_region_url(self):
region_url = self.api_access.get_region_url(REGION_NAME, SERVICE_NAME)
self.assertNotEqual(region_url, None, "Can't get region url")
def test_get_region_url_with_wrong_region_name(self):
# error region name doesn't exist in the regions info
region_url = self.api_access.get_region_url(ERROR_REGION_NAME, "")
self.assertIs(region_url, None, "Can't get None with the region " +
"name is wrong")
def test_get_region_url_without_service_endpoint(self):
# error service doesn't exist in region service endpoints
region_url = self.api_access.get_region_url(REGION_NAME,
ERROR_SERVICE_NAME)
self.assertIs(region_url, None,
"Can't get None with wrong service name")
def test_region_url_nover(self):
# mock return value of get_region_url_from_service,
# which has something starting from v2
self.api_access.get_region_url = MagicMock(return_value=REGION_URL)
region_url = self.api_access.get_region_url_nover(REGION_NAME,
SERVICE_NAME)
# get_region_nover will remove everything from v2
self.assertNotIn("v2", region_url,
"Can't get region url without v2 info")
def test_get_service_region_endpoints(self):
region = REGIONS[REGION_NAME]
result = self.api_access.get_service_region_endpoints(region,
SERVICE_NAME)
self.assertNotEqual(result, None, "Can't get service endpoint")
def test_get_service_region_endpoints_with_nonexistent_service(self):
region = REGIONS[REGION_NAME]
get_endpoints = self.api_access.get_service_region_endpoints
result = get_endpoints(region, ERROR_SERVICE_NAME)
self.assertIs(result, None, "Can't get None when the service name " +
"doesn't exist in region's services")
| ###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
import unittest
from unittest.mock import MagicMock, Mock
import copy
import requests
from scan.fetchers.api.api_access import ApiAccess
from scan.test.fetch.api_fetch.test_data.api_access import *
from scan.test.fetch.api_fetch.test_data.regions import REGIONS
from scan.test.fetch.test_fetch import TestFetch
class TestApiAccess(TestFetch):
def setUp(self):
super().setUp()
self.configure_environment()
self.api_access = ApiAccess()
self.set_regions_for_fetcher(self.api_access)
def test_parse_time_without_dot_in_time(self):
time = self.api_access.keystone_client.parse_time(TIME_WITHOUT_DOT)
self.assertNotEqual(time, None, "Can't parse the time without dot")
def test_parse_time_with_dot_in_time(self):
time = self.api_access.keystone_client.parse_time(TIME_WITH_DOT)
self.assertNotEqual(time, None, "Can't parse the time with dot")
def test_parse_illegal_time(self):
time = self.api_access.keystone_client.parse_time(ILLEGAL_TIME)
self.assertEqual(time, None,
"Can't get None when the time format is wrong")
def test_get_existing_token(self):
self.api_access.keystone_client.tokens = VALID_TOKENS
token = self.api_access.keystone_client.get_existing_token(PROJECT)
self.assertNotEqual(token, VALID_TOKENS[PROJECT],
"Can't get existing token")
def test_get_nonexistent_token(self):
self.api_access.keystone_client.tokens = EMPTY_TOKENS
token = self.api_access.keystone_client.get_existing_token(TEST_PROJECT)
self.assertEqual(token, None,
"Can't get None when the token doesn't exist "
"in tokens")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth(self):
self.api_access.keystone_client.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# mock authentication info from OpenStack Api
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertNotEqual(token_details, None, "Can't get the token details")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_content(self):
self.api_access.get_existing_token = MagicMock(return_value=None)
self.response.json = Mock(return_value=ERROR_AUTH_CONTENT)
# authentication content from OpenStack Api will be incorrect
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertIs(token_details, None,
"Can't get None when the content is wrong")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_token(self):
self.response.status_code = requests.codes.bad_request
self.response.json = Mock(return_value=ERROR_TOKEN_CONTENT)
# authentication info from OpenStack Api will not contain token info
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
self.assertIs(token_details, None, "Can't get None when the content " +
"doesn't contain any token info")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_with_error_expiry_time(self):
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# store original parse_time method
original_method = self.api_access.parse_time
# the time will not be parsed
self.api_access.parse_time = MagicMock(return_value=None)
token_details = self.api_access.v2_auth(TEST_PROJECT, TEST_HEADER,
TEST_BODY)
# reset original parse_time method
self.api_access.parse_time = original_method
self.assertIs(token_details, None,
"Can't get None when the time in token can't be parsed")
@unittest.skip("TODO: refactor for new ApiAccess")
def test_v2_auth_pwd(self):
self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
# mock the authentication info from OpenStack Api
token = self.api_access.v2_auth_pwd(PROJECT)
self.assertNotEqual(token, None, "Can't get token")
def test_get_url(self):
get_response = copy.deepcopy(self.response)
get_response.status_code = requests.codes.ok
self.requests_get = requests.get
requests.get = MagicMock(return_value=get_response)
get_response.json = Mock(return_value=GET_CONTENT)
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
# check whether it returns content message when the response is correct
self.assertNotEqual(result, None, "Can't get content when the "
"response is correct")
requests.get = self.requests_get
def test_get_url_with_error_response(self):
get_response = copy.deepcopy(self.response)
get_response.status_code = requests.codes.bad_request
get_response.text = "Bad request"
get_response.json = Mock(return_value=GET_CONTENT)
self.requests_get = requests.get
requests.get = MagicMock(return_value=get_response)
# the response will be wrong
result = self.api_access.get_url(TEST_URL, TEST_HEADER)
self.assertEqual(result, None, "Result returned" +
"when the response status is not 200")
requests.get = self.requests_get
def test_get_region_url(self):
region_url = self.api_access.get_region_url(REGION_NAME, SERVICE_NAME)
self.assertNotEqual(region_url, None, "Can't get region url")
def test_get_region_url_with_wrong_region_name(self):
# error region name doesn't exist in the regions info
region_url = self.api_access.get_region_url(ERROR_REGION_NAME, "")
self.assertIs(region_url, None, "Can't get None with the region " +
"name is wrong")
def test_get_region_url_without_service_endpoint(self):
# error service doesn't exist in region service endpoints
region_url = self.api_access.get_region_url(REGION_NAME,
ERROR_SERVICE_NAME)
self.assertIs(region_url, None,
"Can't get None with wrong service name")
def test_region_url_nover(self):
# mock return value of get_region_url_from_service,
# which has something starting from v2
self.api_access.get_region_url = MagicMock(return_value=REGION_URL)
region_url = self.api_access.get_region_url_nover(REGION_NAME,
SERVICE_NAME)
# get_region_nover will remove everything from v2
self.assertNotIn("v2", region_url,
"Can't get region url without v2 info")
def test_get_service_region_endpoints(self):
region = REGIONS[REGION_NAME]
result = self.api_access.get_service_region_endpoints(region,
SERVICE_NAME)
self.assertNotEqual(result, None, "Can't get service endpoint")
def test_get_service_region_endpoints_with_nonexistent_service(self):
region = REGIONS[REGION_NAME]
get_endpoints = self.api_access.get_service_region_endpoints
result = get_endpoints(region, ERROR_SERVICE_NAME)
self.assertIs(result, None, "Can't get None when the service name " +
"doesn't exist in region's services") | en | 0.607665 | ############################################################################### # Copyright (c) 2017-2020 <NAME> (Cisco Systems), # # <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others # # # # All rights reserved. This program and the accompanying materials # # are made available under the terms of the Apache License, Version 2.0 # # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ############################################################################### # mock authentication info from OpenStack Api # authentication content from OpenStack Api will be incorrect # authentication info from OpenStack Api will not contain token info # store original parse_time method # the time will not be parsed # reset original parse_time method # mock the authentication info from OpenStack Api # check whether it returns content message when the response is correct # the response will be wrong # error region name doesn't exist in the regions info # error service doesn't exist in region service endpoints # mock return value of get_region_url_from_service, # which has something starting from v2 # get_region_nover will remove everything from v2 | 2.155239 | 2 |
introduction-to-python/python-lists/script_06.py | nhutnamhcmus/datacamp-playground | 1 | 6618117 | <gh_stars>1-10
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[:6]
# Use slicing to create upstairs
upstairs = areas [len(areas)-4:]
# Print out downstairs and upstairs
print(downstairs)
print(upstairs) | # Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[:6]
# Use slicing to create upstairs
upstairs = areas [len(areas)-4:]
# Print out downstairs and upstairs
print(downstairs)
print(upstairs) | en | 0.697555 | # Create the areas list # Use slicing to create downstairs # Use slicing to create upstairs # Print out downstairs and upstairs | 4.007684 | 4 |
daemons/core/module_reducer.py | gtoonstra/mreasy | 0 | 6618118 | <reponame>gtoonstra/mreasy
import os
import sys
import logging
import time
import heapq
from operator import itemgetter
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent)
import lib.remap_utils as remap_utils
import lib.remap_constants as remap_constants
from lib.remap_utils import RemapException
from base_module import WorkerBase
def create_worker( app, appconfig, workdata ):
return Reducer( app, appconfig, workdata )
class Reducer(WorkerBase):
def __init__( self, app, appconfig, workdata ):
WorkerBase.__init__( self, app, appconfig, workdata )
self.total_size = 0
self.prevkey = None
# This is a reducer operation
inputdir = os.path.join( self.remaproot, "job", self.jobid, "part", self.workdata["partition"] )
outputdir = os.path.join( self.remaproot, "data", self.workdata["outputdir"] )
self.reducerfiles = sorted(os.listdir( inputdir ))
self.inputdir = inputdir
self.numparts = len(self.reducerfiles)
self.fraction = 100.0 / self.numparts
self.completedparts = 0
self.outputdir = outputdir
self.partition = self.workdata["partition"]
self.reducerWriter = self.app.create_reducer_writer( self.outputdir, self.partition )
self.sources = []
for filename in self.reducerfiles:
f = self.app.create_reducer_reader( os.path.join( self.inputdir, filename ))
self.sources.append( f )
self.total_size = self.total_size + f.filesize
decorated = [
((key,list_of_values,recsize) for key,list_of_values,recsize in f.read())
for f in self.sources]
self.merged = heapq.merge(*decorated)
def status( self ):
return {"partition":self.partition,"progress":self.progress}
def result( self ):
if len(self.sources) == 0:
return "complete", {"partition":self.partition}
return "fail", {"partition":self.partition}
def work( self ):
if len(self.sources) == 0:
return False
readrec = False
for k2,v2,recsize in self.merged:
readrec = True
if self.prevkey == None:
# Initialize the very first step
self.prevkey = k2
self.prevlist = v2
self.processed = recsize
elif self.prevkey != k2:
# The key changed. Dump all values of previous step
for k3,v3 in self.app.reduce( self.prevkey, self.prevlist ):
self.reducerWriter.store( k3, v3 )
self.prevkey = k2
self.prevlist = v2
self.processed = self.processed + recsize
else:
# Add another record to the list
self.prevlist = self.prevlist + v2
self.processed = self.processed + recsize
p = (self.processed / self.total_size) * 100
if p > self.progress+5:
self.progress = int(p)
# breaking out of the loop to check up on messages
break
if not readrec:
# done
self.progress = 100
for f in self.sources:
f.close()
self.sources = []
self.reducerWriter.close()
return True
| import os
import sys
import logging
import time
import heapq
from operator import itemgetter
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent)
import lib.remap_utils as remap_utils
import lib.remap_constants as remap_constants
from lib.remap_utils import RemapException
from base_module import WorkerBase
def create_worker( app, appconfig, workdata ):
return Reducer( app, appconfig, workdata )
class Reducer(WorkerBase):
def __init__( self, app, appconfig, workdata ):
WorkerBase.__init__( self, app, appconfig, workdata )
self.total_size = 0
self.prevkey = None
# This is a reducer operation
inputdir = os.path.join( self.remaproot, "job", self.jobid, "part", self.workdata["partition"] )
outputdir = os.path.join( self.remaproot, "data", self.workdata["outputdir"] )
self.reducerfiles = sorted(os.listdir( inputdir ))
self.inputdir = inputdir
self.numparts = len(self.reducerfiles)
self.fraction = 100.0 / self.numparts
self.completedparts = 0
self.outputdir = outputdir
self.partition = self.workdata["partition"]
self.reducerWriter = self.app.create_reducer_writer( self.outputdir, self.partition )
self.sources = []
for filename in self.reducerfiles:
f = self.app.create_reducer_reader( os.path.join( self.inputdir, filename ))
self.sources.append( f )
self.total_size = self.total_size + f.filesize
decorated = [
((key,list_of_values,recsize) for key,list_of_values,recsize in f.read())
for f in self.sources]
self.merged = heapq.merge(*decorated)
def status( self ):
return {"partition":self.partition,"progress":self.progress}
def result( self ):
if len(self.sources) == 0:
return "complete", {"partition":self.partition}
return "fail", {"partition":self.partition}
def work( self ):
if len(self.sources) == 0:
return False
readrec = False
for k2,v2,recsize in self.merged:
readrec = True
if self.prevkey == None:
# Initialize the very first step
self.prevkey = k2
self.prevlist = v2
self.processed = recsize
elif self.prevkey != k2:
# The key changed. Dump all values of previous step
for k3,v3 in self.app.reduce( self.prevkey, self.prevlist ):
self.reducerWriter.store( k3, v3 )
self.prevkey = k2
self.prevlist = v2
self.processed = self.processed + recsize
else:
# Add another record to the list
self.prevlist = self.prevlist + v2
self.processed = self.processed + recsize
p = (self.processed / self.total_size) * 100
if p > self.progress+5:
self.progress = int(p)
# breaking out of the loop to check up on messages
break
if not readrec:
# done
self.progress = 100
for f in self.sources:
f.close()
self.sources = []
self.reducerWriter.close()
return True | en | 0.850071 | # This is a reducer operation # Initialize the very first step # The key changed. Dump all values of previous step # Add another record to the list # breaking out of the loop to check up on messages # done | 2.274423 | 2 |
Python_3/script_3/circle.py | sgpandey05/Digikull_Assignments | 0 | 6618119 | import math
class circle:
def __init__(self,radius):
self.radius=radius
def area(self):
return math.pi*(self.radius**2)
def Circumference(self):
return 2*math.pi*self.radius
def __eq__(self, other):
if self.radius==other.radius:
return "Both Circle are Equal"
elif self.radius < other.radius:
return "Circle 2 is bigger than Circle 1"
else:
return "Circle 1 is bigger than Circle 2"
r1=int(input("Enter radius of circle: 1: "))
r2=int(input("Enter radius of circle: 2: "))
obj1=circle(r1)
obj2=circle(r2)
print("Area of circle:1 ",round(obj1.area(),2))
print("Area of circle:2 ",round(obj2.area(),2))
print("Circumference of circle:1 ",round(obj1.Circumference(),2))
print("Circumference of circle:2 ",round(obj2.Circumference(),2))
print(obj1==obj2) | import math
class circle:
def __init__(self,radius):
self.radius=radius
def area(self):
return math.pi*(self.radius**2)
def Circumference(self):
return 2*math.pi*self.radius
def __eq__(self, other):
if self.radius==other.radius:
return "Both Circle are Equal"
elif self.radius < other.radius:
return "Circle 2 is bigger than Circle 1"
else:
return "Circle 1 is bigger than Circle 2"
r1=int(input("Enter radius of circle: 1: "))
r2=int(input("Enter radius of circle: 2: "))
obj1=circle(r1)
obj2=circle(r2)
print("Area of circle:1 ",round(obj1.area(),2))
print("Area of circle:2 ",round(obj2.area(),2))
print("Circumference of circle:1 ",round(obj1.Circumference(),2))
print("Circumference of circle:2 ",round(obj2.Circumference(),2))
print(obj1==obj2) | none | 1 | 4.250144 | 4 | |
ok2_backend/common/ok2_middleware.py | Mipsters/ok2-backend | 1 | 6618120 | <reponame>Mipsters/ok2-backend
import os
from django.utils.functional import SimpleLazyObject
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model as user_model
from common.utils import get_token
def get_user(request):
token = get_token(request)
if token:
return User.objects.get(pk=token['user_id'])
return AnonymousUser()
class OkMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request, *args, **kwargs):
# do the job of working with jwt, happens before the view is called
authorization = request.headers.get('Authorization')
if authorization:
request.user = get_user(request)
response = self.get_response(request)
if request.user.is_anonymous:
return response
for field in ['id', 'username', 'email']:
if field not in request.session:
request.session[field] = getattr(request.user, field)
# do the job after the view is called if relevant
return response
| import os
from django.utils.functional import SimpleLazyObject
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model as user_model
from common.utils import get_token
def get_user(request):
token = get_token(request)
if token:
return User.objects.get(pk=token['user_id'])
return AnonymousUser()
class OkMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request, *args, **kwargs):
# do the job of working with jwt, happens before the view is called
authorization = request.headers.get('Authorization')
if authorization:
request.user = get_user(request)
response = self.get_response(request)
if request.user.is_anonymous:
return response
for field in ['id', 'username', 'email']:
if field not in request.session:
request.session[field] = getattr(request.user, field)
# do the job after the view is called if relevant
return response | en | 0.990759 | # do the job of working with jwt, happens before the view is called # do the job after the view is called if relevant | 2.336406 | 2 |
src/generate_unconditional_samples.py | randywreed/gpt-2 | 0 | 6618121 | #!/usr/bin/env python3
import os
import sys
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')]
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))]
import argparse
import fire
import json
import numpy as np
import tensorflow as tf
import tflex
import torch
import model, sample, encoder
parser=argparse.ArgumentParser(
description="Generate Text from GPT-2 from prompt",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--model_name',metavar='MODEL',type=str,default='117M',help='Pretrained model name')
parser.add_argument('--model_dir',type=str,default="models",help='Directory to use for model. Should have a subdirectory of model_name')
parser.add_argument('--restore_from', type=str, default='latest', help='Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--padding_text", type=str, default="", help="Padding text for Transfo-XL and XLNet.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--nsamples",type=int,default=1,help="Number of samples to generate")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--batch_size", type=int, default=1,help="batch size must be divisible by nsamples")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
def sample_model(
model_name=args.model_name,
restore_from=args.restore_from,
checkpoint_dir=args.model_dir,
seed=args.seed,
nsamples=args.nsamples,
batch_size=args.batch_size,
length=args.length,
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
penalize=args.repetition_penalty
):
"""
Run the sample_model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to
reproduce results
:nsamples=0 : Number of samples to return, if 0, continues to
generate samples indefinately.
:batch_size=1 : Number of batches (only affects speed/memory).
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
:penalize=0.0 : Float value controlling "used" penalty. Implements repetition
reduction (similar to CTRL) if set to a value > 0. A decent setting might be 0.85
with temperature 0.3 and top_k 40.
"""
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join(checkpoint_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tflex.Session(graph=tf.Graph()) as sess:
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
start_token=enc.encoder['<|endoftext|>'],
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p, penalize=penalize
)[:, 1:]
saver = tflex.Saver()
if restore_from is None:
restore_from = os.path.join('models', model_name)
ckpt = tflex.latest_checkpoint(restore_from)
saver.restore(sess, ckpt)
generated = 0
while nsamples == 0 or generated < nsamples:
out = sess.run(output)
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
if __name__ == '__main__':
fire.Fire(sample_model)
| #!/usr/bin/env python3
import os
import sys
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')]
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))]
import argparse
import fire
import json
import numpy as np
import tensorflow as tf
import tflex
import torch
import model, sample, encoder
parser=argparse.ArgumentParser(
description="Generate Text from GPT-2 from prompt",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--model_name',metavar='MODEL',type=str,default='117M',help='Pretrained model name')
parser.add_argument('--model_dir',type=str,default="models",help='Directory to use for model. Should have a subdirectory of model_name')
parser.add_argument('--restore_from', type=str, default='latest', help='Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--padding_text", type=str, default="", help="Padding text for Transfo-XL and XLNet.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--nsamples",type=int,default=1,help="Number of samples to generate")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--batch_size", type=int, default=1,help="batch size must be divisible by nsamples")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
def sample_model(
model_name=args.model_name,
restore_from=args.restore_from,
checkpoint_dir=args.model_dir,
seed=args.seed,
nsamples=args.nsamples,
batch_size=args.batch_size,
length=args.length,
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
penalize=args.repetition_penalty
):
"""
Run the sample_model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to
reproduce results
:nsamples=0 : Number of samples to return, if 0, continues to
generate samples indefinately.
:batch_size=1 : Number of batches (only affects speed/memory).
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
:penalize=0.0 : Float value controlling "used" penalty. Implements repetition
reduction (similar to CTRL) if set to a value > 0. A decent setting might be 0.85
with temperature 0.3 and top_k 40.
"""
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join(checkpoint_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tflex.Session(graph=tf.Graph()) as sess:
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
start_token=enc.encoder['<|endoftext|>'],
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p, penalize=penalize
)[:, 1:]
saver = tflex.Saver()
if restore_from is None:
restore_from = os.path.join('models', model_name)
ckpt = tflex.latest_checkpoint(restore_from)
saver.restore(sess, ckpt)
generated = 0
while nsamples == 0 or generated < nsamples:
out = sess.run(output)
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
if __name__ == '__main__':
fire.Fire(sample_model)
| en | 0.812974 | #!/usr/bin/env python3 Run the sample_model :model_name=117M : String, which model to use :seed=None : Integer seed for random number generators, fix seed to reproduce results :nsamples=0 : Number of samples to return, if 0, continues to generate samples indefinately. :batch_size=1 : Number of batches (only affects speed/memory). :length=None : Number of tokens in generated text, if None (default), is determined by model hyperparameters :temperature=1 : Float value controlling randomness in boltzmann distribution. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions. :top_k=0 : Integer value controlling diversity. 1 means only 1 word is considered for each step (token), resulting in deterministic completions, while 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value. :top_p=0.0 : Float value controlling diversity. Implements nucleus sampling, overriding top_k if set to a value > 0. A good setting is 0.9. :penalize=0.0 : Float value controlling "used" penalty. Implements repetition reduction (similar to CTRL) if set to a value > 0. A decent setting might be 0.85 with temperature 0.3 and top_k 40. | 2.149855 | 2 |
tests/test_synth_toolbox.py | kolea2/synthtool | 53 | 6618122 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest # type:ignore
from tests.util import make_working_repo
import autosynth.abstract_source
import autosynth.synth
from integration_tests import util
def test_compose_pr_title_with_many_commits():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "", "")
assert text == (
"[CHANGE ME] Re-generated to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_many_commits_and_source_name():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "", "googleapis")
assert text == ("[CHANGE ME] Re-generated to pick up changes " "from googleapis.")
def test_compose_pr_title_with_many_commits_and_synth_path():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "automl", "")
assert text == (
"[CHANGE ME] Re-generated automl to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_many_commits_and_source_name_and_synth_path():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "automl", "googleapis")
assert text == (
"[CHANGE ME] Re-generated automl to pick up changes " "from googleapis."
)
@pytest.fixture(scope="module")
def working_repo():
with tempfile.TemporaryDirectory() as working_dir, util.OsChdirContext(working_dir):
make_working_repo(working_dir)
yield working_dir
def test_compose_pr_title_with_one_commit(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "", "")
assert text == "c subject"
def test_compose_pr_title_with_two_commits(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(2, 1, "", "")
assert text == (
"[CHANGE ME] Re-generated to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_one_commit_and_synth_path(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "automl", "")
assert text == "[automl] c subject"
def test_compose_pr_title_with_one_commit_and_source_name(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "", "googleapis")
assert text == "c subject"
def test_compose_pr_title_with_one_commit_and_synth_path_and_source_name(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "automl", "googleapis")
assert text == "[automl] c subject"
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest # type:ignore
from tests.util import make_working_repo
import autosynth.abstract_source
import autosynth.synth
from integration_tests import util
def test_compose_pr_title_with_many_commits():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "", "")
assert text == (
"[CHANGE ME] Re-generated to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_many_commits_and_source_name():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "", "googleapis")
assert text == ("[CHANGE ME] Re-generated to pick up changes " "from googleapis.")
def test_compose_pr_title_with_many_commits_and_synth_path():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "automl", "")
assert text == (
"[CHANGE ME] Re-generated automl to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_many_commits_and_source_name_and_synth_path():
text = autosynth.synth_toolbox._compose_pr_title(3, 3, "automl", "googleapis")
assert text == (
"[CHANGE ME] Re-generated automl to pick up changes " "from googleapis."
)
@pytest.fixture(scope="module")
def working_repo():
with tempfile.TemporaryDirectory() as working_dir, util.OsChdirContext(working_dir):
make_working_repo(working_dir)
yield working_dir
def test_compose_pr_title_with_one_commit(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "", "")
assert text == "c subject"
def test_compose_pr_title_with_two_commits(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(2, 1, "", "")
assert text == (
"[CHANGE ME] Re-generated to pick up changes "
"in the API or client library generator."
)
def test_compose_pr_title_with_one_commit_and_synth_path(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "automl", "")
assert text == "[automl] c subject"
def test_compose_pr_title_with_one_commit_and_source_name(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "", "googleapis")
assert text == "c subject"
def test_compose_pr_title_with_one_commit_and_synth_path_and_source_name(working_repo):
text = autosynth.synth_toolbox._compose_pr_title(1, 1, "automl", "googleapis")
assert text == "[automl] c subject"
| en | 0.851668 | # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # type:ignore | 1.801591 | 2 |
terms/views.py | jtrouth/django-terms | 4 | 6618123 | <filename>terms/views.py
from django.views.generic import DetailView
from .models import Term
class TermDetail(DetailView):
model = Term
context_object_name = 'term'
def get_queryset(self):
return self.model.objects.exclude(definition='')
| <filename>terms/views.py
from django.views.generic import DetailView
from .models import Term
class TermDetail(DetailView):
model = Term
context_object_name = 'term'
def get_queryset(self):
return self.model.objects.exclude(definition='')
| none | 1 | 1.953171 | 2 | |
app/http/controllers/FileController.py | doseextra/cms | 0 | 6618124 | from flask import jsonify
from app.utils.files import get_all_files, save_files
def index():
return jsonify(get_all_files())
| from flask import jsonify
from app.utils.files import get_all_files, save_files
def index():
return jsonify(get_all_files())
| none | 1 | 2.044057 | 2 | |
doc/tutorials/dev/plot_template.py | mjziebarth/gimli | 224 | 6618125 | #!/ussr/bin/env python
# -*- coding: utf-8 -*-
'''
TUTORIAL NAME
-------------
:math:`\arr{x}=\transpose{[x_1,\ldots,x_N]} \grad \u{1} \arr{m} \unit{m}`
*This introductory sentence should state the intent and goal of the tutorial. Keep it brief.*
*This next block should state any assumptions that you the writer are making. Present them in list form.*
Cite something :cite:`Zienkiewicz1977` or [Zienkiewicz1977]_
glossary ref :term:`numpy`
GIMLi api ref :gimliapi:`GIMLI::Cell`
pygimli ref ??????
'''
import pygimli as g
print((g.versionStr()))
"""
Last output
.. lastcout::
"""
"""
Invoking :term:`Matplotlib`.
Please see http://matplotlib.org/1.3.0/faq/usage_faq.html#general-concepts
"""
import matplotlib.pyplot as plt
plt.plot(1, 1, 'x')
plt.show()
| #!/ussr/bin/env python
# -*- coding: utf-8 -*-
'''
TUTORIAL NAME
-------------
:math:`\arr{x}=\transpose{[x_1,\ldots,x_N]} \grad \u{1} \arr{m} \unit{m}`
*This introductory sentence should state the intent and goal of the tutorial. Keep it brief.*
*This next block should state any assumptions that you the writer are making. Present them in list form.*
Cite something :cite:`Zienkiewicz1977` or [Zienkiewicz1977]_
glossary ref :term:`numpy`
GIMLi api ref :gimliapi:`GIMLI::Cell`
pygimli ref ??????
'''
import pygimli as g
print((g.versionStr()))
"""
Last output
.. lastcout::
"""
"""
Invoking :term:`Matplotlib`.
Please see http://matplotlib.org/1.3.0/faq/usage_faq.html#general-concepts
"""
import matplotlib.pyplot as plt
plt.plot(1, 1, 'x')
plt.show()
| en | 0.483565 | #!/ussr/bin/env python # -*- coding: utf-8 -*- TUTORIAL NAME ------------- :math:`\arr{x}=\transpose{[x_1,\ldots,x_N]} \grad \u{1} \arr{m} \unit{m}` *This introductory sentence should state the intent and goal of the tutorial. Keep it brief.* *This next block should state any assumptions that you the writer are making. Present them in list form.* Cite something :cite:`Zienkiewicz1977` or [Zienkiewicz1977]_ glossary ref :term:`numpy` GIMLi api ref :gimliapi:`GIMLI::Cell` pygimli ref ?????? Last output .. lastcout:: Invoking :term:`Matplotlib`. Please see http://matplotlib.org/1.3.0/faq/usage_faq.html#general-concepts | 3.448788 | 3 |
prologin/2015/3_temperatures.py | AntoineAugusti/katas | 7 | 6618126 | # http://prologin.org/training/challenge/demi2015/temperatures
from sys import stdin
nbTemperatures = int(stdin.readline())
temperatures = [int(x) for x in stdin.readline().split()]
deltaEncoding = []
deltaEncoding.append(temperatures[0])
for i in range(1, nbTemperatures):
deltaEncoding.append(temperatures[i] - temperatures[i-1])
print ' '.join(str(delta) for delta in deltaEncoding)
| # http://prologin.org/training/challenge/demi2015/temperatures
from sys import stdin
nbTemperatures = int(stdin.readline())
temperatures = [int(x) for x in stdin.readline().split()]
deltaEncoding = []
deltaEncoding.append(temperatures[0])
for i in range(1, nbTemperatures):
deltaEncoding.append(temperatures[i] - temperatures[i-1])
print ' '.join(str(delta) for delta in deltaEncoding)
| en | 0.364467 | # http://prologin.org/training/challenge/demi2015/temperatures | 2.861916 | 3 |
deepred_pytorch/io/__init__.py | dileep-kishore/DEEPred-pytorch | 0 | 6618127 | from .data import parse_data, parse_go_dag
| from .data import parse_data, parse_go_dag
| none | 1 | 1.009108 | 1 | |
ETTS/trainer.py | ishine/FG-transformer-TTS | 26 | 6618128 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from .dataloader import LJSpeechDataset, VCTKDataset, RandomBucketSampler
from torch.utils import data
import pytorch_lightning.core.lightning as pl
import sys
import soundfile as sf
from tqdm import tqdm
import os
import shutil
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class ETTSTrasnformerModel(pl.LightningModule):
def __init__(self, datadir, datatype, vocoder_ckpt_path, maxlength, nmels,
emo_embed_dim, text_embed_dim, model_dim, model_hidden_size,
nlayers, nheads, ngst, nlst, use_lst,
train_bucket_size, val_bucket_size,
warmup_step, maxstep, lr, batch_size,
distributed, val_split, gate_pos_weight,
nworkers, num_audio_sample, sampledir, n_attn_plots,
use_guided_attn, n_guided_steps,
etts_checkpoint=None):
super().__init__()
self.datatype = datatype
self.nworkers = nworkers
self.train_bucket_size = train_bucket_size
self.val_bucket_size = val_bucket_size
self.warmup_step = warmup_step
self.maxstep = maxstep
self.lr = lr
self.maxlength = maxlength
self.distributed = distributed
self.num_audio_sample = num_audio_sample
self.sampledir = sampledir
self.datadir = datadir
self.batch_size = batch_size
self.n_attn_plots = n_attn_plots
self.use_guided_attn = use_guided_attn
self.n_guided_steps = n_guided_steps
self.use_lst = use_lst
self.data = LJSpeechDataset(datadir) if self.datatype == 'LJSpeech' else VCTKDataset(datadir)
if self.use_lst:
from .ettstransformer import ETTSTransformer
self.model = ETTSTransformer(text_embed_dim, emo_embed_dim, nmels, maxlength, self.data.labelset,
ngst, nlst,
d_model=model_dim, hidden_size=model_hidden_size,
nlayers=nlayers, nheads=nheads)
else:
from .baseline import ETTSTransformer_baseline
self.model = ETTSTransformer_baseline(text_embed_dim, emo_embed_dim, nmels, maxlength, self.data.labelset, ngst,
d_model=model_dim, hidden_size=model_hidden_size,
nlayers=nlayers, nheads=nheads)
if etts_checkpoint is not None:
#Extract state dict
state_dict = torch.load(etts_checkpoint)['state_dict']
non_parameters = ['tgt_mask', 'pos_txt.p', 'pos_mel.p']
new_state_dict = dict()
for k, v in state_dict.items():
if k.split('.')[0] == 'model':
k = '.'.join(k.split('.')[1:])
if k not in non_parameters:
new_state_dict[k] = v
print (self.model.load_state_dict(new_state_dict, strict=False))
sys.path.insert(0, 'waveglow') #For waveglow vocoder
waveglow = torch.load(vocoder_ckpt_path)['model']
self.vocoder = waveglow.remove_weightnorm(waveglow).eval()
numtraining = int(len(self.data) * val_split)
splits = [numtraining, len(self.data) - numtraining]
self.traindata, self.valdata = data.random_split(self.data, splits, generator=torch.Generator().manual_seed(58))
self.register_buffer('gate_pos_weight', torch.FloatTensor([gate_pos_weight]))
self.gate_criterion = nn.BCEWithLogitsLoss(pos_weight=self.gate_pos_weight)
self.reconstruct_criterion = nn.L1Loss()
def train_dataloader(self):
idxs = self.traindata.indices
length = [self.data.lengths[i] for i in idxs]
sampler = RandomBucketSampler(self.train_bucket_size, length, self.batch_size, drop_last=True, distributed=self.distributed,
world_size=self.trainer.world_size, rank=self.trainer.local_rank, dynamic_batch=False)
return data.DataLoader(self.traindata,
num_workers=self.nworkers,
batch_sampler=sampler,
collate_fn=self.data.seqCollate)
def val_dataloader(self):
idxs = self.valdata.indices
length = [self.data.lengths[i] for i in idxs]
with open('valid_LJ.set', 'w') as f:
for i in idxs:
f.write(f"{self.data.audio_names[i]}\n")
print ([self.data.audio_names[i] for i in idxs])
sampler = RandomBucketSampler(self.val_bucket_size, length, self.batch_size, drop_last=True, distributed=self.distributed,
world_size=self.trainer.world_size, rank=self.trainer.local_rank, dynamic_batch=False)
return data.DataLoader(self.valdata,
num_workers=self.nworkers,
batch_sampler=sampler,
collate_fn=self.data.seqCollate)
def configure_optimizers(self):
params = self.model.parameters()
optimizer = optim.Adam(params, lr=self.lr)
#Learning rate scheduler
num_training_steps = self.maxstep
num_warmup_steps = self.warmup_step
num_flat_steps = int(0.3 * num_training_steps)
def lambda_lr(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
elif current_step < (num_warmup_steps + num_flat_steps):
return 1.0
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - (num_warmup_steps + num_flat_steps)))
)
scheduler = {
'scheduler': optim.lr_scheduler.LambdaLR(optimizer, lambda_lr),
'interval': 'step'
}
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
if self.datatype == 'LJSpeech':
mels, l_mel, emos, l_emo, phonemes, l_phoneme, _ = batch
g_emos, g_l_emos = emos, l_emo
elif self.datatype == 'VCTK':
mels, l_mel, emos, l_emo, g_emos, g_l_emos, phonemes, l_phoneme, _ = batch
if self.use_lst:
(mel_out, mel_out_post, gate_logit, mask, gloss, _, _, _) = self.model(g_emos, emos, phonemes, mels, g_l_emos, l_emo, l_phoneme, l_mel)
else:
(mel_out, mel_out_post, gate_logit, mask, gloss, _, _, _) = self.model(g_emos, phonemes, mels, g_l_emos, l_phoneme, l_mel)
mask = ~mask
reconstruct_loss = (self.reconstruct_criterion(mel_out[mask], mels[mask]) +
self.reconstruct_criterion(mel_out_post[mask], mels[mask]))
gate_labels = F.one_hot(l_mel - 1, num_classes=mels.size(1)).float()
gate_loss = self.gate_criterion(gate_logit[mask], gate_labels[mask])
loss = reconstruct_loss + gate_loss
if self.use_guided_attn and self.global_step < self.n_guided_steps:
loss += gloss
tqdm_dict = {
'loss': loss,
'rec_loss': reconstruct_loss,
'gate_loss': gate_loss,
'guided_loss': gloss
}
self.log_dict(tqdm_dict, on_step=True, on_epoch=True, prog_bar=True)
return loss
def on_validation_epoch_start(self):
self.sampled_emotion, self.sampled_text = [], []
self.reference_names, self.text_names = [], []
self.sampled_attention_plots = 0
def validation_step(self, batch, batch_idx):
if self.datatype == 'LJSpeech':
mels, l_mel, emos, l_emo, phonemes, l_phoneme, names = batch
g_emos, g_l_emos = emos, l_emo
elif self.datatype == 'VCTK':
mels, l_mel, emos, l_emo, g_emos, g_l_emos, phonemes, l_phoneme, names = batch
if self.use_lst:
mel_out, mel_out_post, gate_logit, mask, gloss, attns, enc_attn, dec_attn = self.model(g_emos, emos, phonemes, mels, g_l_emos, l_emo, l_phoneme, l_mel)
else:
(mel_out, mel_out_post, gate_logit, mask, gloss, attns, enc_attn, dec_attn) = self.model(g_emos, phonemes, mels, g_l_emos, l_phoneme, l_mel)
mask = ~mask
reconstruct_loss = (self.reconstruct_criterion(mel_out[mask], mels[mask]) +
self.reconstruct_criterion(mel_out_post[mask], mels[mask]))
gate_labels = F.one_hot(l_mel - 1, num_classes=mels.size(1)).float()
gate_loss = self.gate_criterion(gate_logit[mask], gate_labels[mask])
loss = reconstruct_loss + gate_loss
if self.use_guided_attn and self.global_step < self.n_guided_steps:
loss += gloss
validdict = {
'val_loss': loss,
'val_rec_loss': reconstruct_loss,
'val_gate_loss': gate_loss,
'val_guided_loss': gloss
}
self.log_dict(validdict, on_epoch=True, logger=True, sync_dist=self.distributed)
if len(self.sampled_text) < self.num_audio_sample and batch_idx % 2 == 0:
self.sampled_text += [text[: l] for text, l in zip(phonemes, l_phoneme)]
self.text_names += names
if len(self.sampled_emotion) < self.num_audio_sample and batch_idx % 2 == 1:
self.sampled_emotion += [emo[: l] for emo, l in zip(emos.detach(), l_emo)]
self.reference_names += names
if self.sampled_attention_plots < self.n_attn_plots:
self.plot_attn(attns, 'ed')
self.plot_attn(enc_attn, 'enc')
self.plot_attn(dec_attn, 'dec')
self.sampled_attention_plots += 1
return loss
def plot_attn(self, attns, prefix):
fig, axs = plt.subplots(1, len(attns))
for i, attn in enumerate(attns): #Each layers
attn = attn.cpu().numpy()
sampled_attn = attn[0]
axs[i].matshow(sampled_attn)
outpath = os.path.join(self.sampledir, f'epoch{self.current_epoch}-{prefix}-{self.sampled_attention_plots}.png')
fig.savefig(outpath)
fig.clf()
plt.close()
def on_validation_epoch_end(self):
if self.trainer.local_rank != 0:
return
print ("Synthesizing Audio with unpaired emo/text from sampled validation set...")
#Run with mis-paired reference audio / text
self.sampled_attention_plots = 0
#Use the first text as text condition, observe the difference when inputting different reference speech
text = self.sampled_text[0].unsqueeze(0)
ref_text_name = self.text_names[0]
ref_text = self.data.label[ref_text_name]['text']
with open(os.path.join(self.sampledir, f'epoch{self.current_epoch}.txt'), 'w') as f:
f.write(ref_text)
for i in tqdm(range(min(self.num_audio_sample, len(self.sampled_text)))):
emo = self.sampled_emotion[i].unsqueeze(0)
with torch.no_grad():
if self.use_lst:
mel, attns = self.model.inference(emo, emo, text, maxlen=self.maxlength, threshold=.5)
else:
mel, attns = self.model.inference(emo, text, maxlen=self.maxlength, threshold=.5)
audio = self.vocoder.infer(mel.transpose(1, 2), sigma=0.6) * 32768.0
audio = audio.squeeze(0).cpu().numpy().astype('int16')
sf.write(os.path.join(self.sampledir, f'epoch{self.current_epoch}-{i}.wav'), audio, 22050)
if self.sampled_attention_plots < self.n_attn_plots:
self.plot_attn(attns, 'inf_ed')
self.sampled_attention_plots += 1
ref_audio_name = self.reference_names[i]
raw_path = os.path.join(self.datadir, '16k_wav', ref_audio_name)
shutil.copyfile(raw_path + '.wav', os.path.join(self.sampledir, f'epoch{self.current_epoch}-{i}-ref.wav'))
| import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from .dataloader import LJSpeechDataset, VCTKDataset, RandomBucketSampler
from torch.utils import data
import pytorch_lightning.core.lightning as pl
import sys
import soundfile as sf
from tqdm import tqdm
import os
import shutil
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class ETTSTrasnformerModel(pl.LightningModule):
def __init__(self, datadir, datatype, vocoder_ckpt_path, maxlength, nmels,
emo_embed_dim, text_embed_dim, model_dim, model_hidden_size,
nlayers, nheads, ngst, nlst, use_lst,
train_bucket_size, val_bucket_size,
warmup_step, maxstep, lr, batch_size,
distributed, val_split, gate_pos_weight,
nworkers, num_audio_sample, sampledir, n_attn_plots,
use_guided_attn, n_guided_steps,
etts_checkpoint=None):
super().__init__()
self.datatype = datatype
self.nworkers = nworkers
self.train_bucket_size = train_bucket_size
self.val_bucket_size = val_bucket_size
self.warmup_step = warmup_step
self.maxstep = maxstep
self.lr = lr
self.maxlength = maxlength
self.distributed = distributed
self.num_audio_sample = num_audio_sample
self.sampledir = sampledir
self.datadir = datadir
self.batch_size = batch_size
self.n_attn_plots = n_attn_plots
self.use_guided_attn = use_guided_attn
self.n_guided_steps = n_guided_steps
self.use_lst = use_lst
self.data = LJSpeechDataset(datadir) if self.datatype == 'LJSpeech' else VCTKDataset(datadir)
if self.use_lst:
from .ettstransformer import ETTSTransformer
self.model = ETTSTransformer(text_embed_dim, emo_embed_dim, nmels, maxlength, self.data.labelset,
ngst, nlst,
d_model=model_dim, hidden_size=model_hidden_size,
nlayers=nlayers, nheads=nheads)
else:
from .baseline import ETTSTransformer_baseline
self.model = ETTSTransformer_baseline(text_embed_dim, emo_embed_dim, nmels, maxlength, self.data.labelset, ngst,
d_model=model_dim, hidden_size=model_hidden_size,
nlayers=nlayers, nheads=nheads)
if etts_checkpoint is not None:
#Extract state dict
state_dict = torch.load(etts_checkpoint)['state_dict']
non_parameters = ['tgt_mask', 'pos_txt.p', 'pos_mel.p']
new_state_dict = dict()
for k, v in state_dict.items():
if k.split('.')[0] == 'model':
k = '.'.join(k.split('.')[1:])
if k not in non_parameters:
new_state_dict[k] = v
print (self.model.load_state_dict(new_state_dict, strict=False))
sys.path.insert(0, 'waveglow') #For waveglow vocoder
waveglow = torch.load(vocoder_ckpt_path)['model']
self.vocoder = waveglow.remove_weightnorm(waveglow).eval()
numtraining = int(len(self.data) * val_split)
splits = [numtraining, len(self.data) - numtraining]
self.traindata, self.valdata = data.random_split(self.data, splits, generator=torch.Generator().manual_seed(58))
self.register_buffer('gate_pos_weight', torch.FloatTensor([gate_pos_weight]))
self.gate_criterion = nn.BCEWithLogitsLoss(pos_weight=self.gate_pos_weight)
self.reconstruct_criterion = nn.L1Loss()
def train_dataloader(self):
idxs = self.traindata.indices
length = [self.data.lengths[i] for i in idxs]
sampler = RandomBucketSampler(self.train_bucket_size, length, self.batch_size, drop_last=True, distributed=self.distributed,
world_size=self.trainer.world_size, rank=self.trainer.local_rank, dynamic_batch=False)
return data.DataLoader(self.traindata,
num_workers=self.nworkers,
batch_sampler=sampler,
collate_fn=self.data.seqCollate)
def val_dataloader(self):
idxs = self.valdata.indices
length = [self.data.lengths[i] for i in idxs]
with open('valid_LJ.set', 'w') as f:
for i in idxs:
f.write(f"{self.data.audio_names[i]}\n")
print ([self.data.audio_names[i] for i in idxs])
sampler = RandomBucketSampler(self.val_bucket_size, length, self.batch_size, drop_last=True, distributed=self.distributed,
world_size=self.trainer.world_size, rank=self.trainer.local_rank, dynamic_batch=False)
return data.DataLoader(self.valdata,
num_workers=self.nworkers,
batch_sampler=sampler,
collate_fn=self.data.seqCollate)
def configure_optimizers(self):
params = self.model.parameters()
optimizer = optim.Adam(params, lr=self.lr)
#Learning rate scheduler
num_training_steps = self.maxstep
num_warmup_steps = self.warmup_step
num_flat_steps = int(0.3 * num_training_steps)
def lambda_lr(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
elif current_step < (num_warmup_steps + num_flat_steps):
return 1.0
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - (num_warmup_steps + num_flat_steps)))
)
scheduler = {
'scheduler': optim.lr_scheduler.LambdaLR(optimizer, lambda_lr),
'interval': 'step'
}
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
if self.datatype == 'LJSpeech':
mels, l_mel, emos, l_emo, phonemes, l_phoneme, _ = batch
g_emos, g_l_emos = emos, l_emo
elif self.datatype == 'VCTK':
mels, l_mel, emos, l_emo, g_emos, g_l_emos, phonemes, l_phoneme, _ = batch
if self.use_lst:
(mel_out, mel_out_post, gate_logit, mask, gloss, _, _, _) = self.model(g_emos, emos, phonemes, mels, g_l_emos, l_emo, l_phoneme, l_mel)
else:
(mel_out, mel_out_post, gate_logit, mask, gloss, _, _, _) = self.model(g_emos, phonemes, mels, g_l_emos, l_phoneme, l_mel)
mask = ~mask
reconstruct_loss = (self.reconstruct_criterion(mel_out[mask], mels[mask]) +
self.reconstruct_criterion(mel_out_post[mask], mels[mask]))
gate_labels = F.one_hot(l_mel - 1, num_classes=mels.size(1)).float()
gate_loss = self.gate_criterion(gate_logit[mask], gate_labels[mask])
loss = reconstruct_loss + gate_loss
if self.use_guided_attn and self.global_step < self.n_guided_steps:
loss += gloss
tqdm_dict = {
'loss': loss,
'rec_loss': reconstruct_loss,
'gate_loss': gate_loss,
'guided_loss': gloss
}
self.log_dict(tqdm_dict, on_step=True, on_epoch=True, prog_bar=True)
return loss
def on_validation_epoch_start(self):
self.sampled_emotion, self.sampled_text = [], []
self.reference_names, self.text_names = [], []
self.sampled_attention_plots = 0
def validation_step(self, batch, batch_idx):
if self.datatype == 'LJSpeech':
mels, l_mel, emos, l_emo, phonemes, l_phoneme, names = batch
g_emos, g_l_emos = emos, l_emo
elif self.datatype == 'VCTK':
mels, l_mel, emos, l_emo, g_emos, g_l_emos, phonemes, l_phoneme, names = batch
if self.use_lst:
mel_out, mel_out_post, gate_logit, mask, gloss, attns, enc_attn, dec_attn = self.model(g_emos, emos, phonemes, mels, g_l_emos, l_emo, l_phoneme, l_mel)
else:
(mel_out, mel_out_post, gate_logit, mask, gloss, attns, enc_attn, dec_attn) = self.model(g_emos, phonemes, mels, g_l_emos, l_phoneme, l_mel)
mask = ~mask
reconstruct_loss = (self.reconstruct_criterion(mel_out[mask], mels[mask]) +
self.reconstruct_criterion(mel_out_post[mask], mels[mask]))
gate_labels = F.one_hot(l_mel - 1, num_classes=mels.size(1)).float()
gate_loss = self.gate_criterion(gate_logit[mask], gate_labels[mask])
loss = reconstruct_loss + gate_loss
if self.use_guided_attn and self.global_step < self.n_guided_steps:
loss += gloss
validdict = {
'val_loss': loss,
'val_rec_loss': reconstruct_loss,
'val_gate_loss': gate_loss,
'val_guided_loss': gloss
}
self.log_dict(validdict, on_epoch=True, logger=True, sync_dist=self.distributed)
if len(self.sampled_text) < self.num_audio_sample and batch_idx % 2 == 0:
self.sampled_text += [text[: l] for text, l in zip(phonemes, l_phoneme)]
self.text_names += names
if len(self.sampled_emotion) < self.num_audio_sample and batch_idx % 2 == 1:
self.sampled_emotion += [emo[: l] for emo, l in zip(emos.detach(), l_emo)]
self.reference_names += names
if self.sampled_attention_plots < self.n_attn_plots:
self.plot_attn(attns, 'ed')
self.plot_attn(enc_attn, 'enc')
self.plot_attn(dec_attn, 'dec')
self.sampled_attention_plots += 1
return loss
def plot_attn(self, attns, prefix):
fig, axs = plt.subplots(1, len(attns))
for i, attn in enumerate(attns): #Each layers
attn = attn.cpu().numpy()
sampled_attn = attn[0]
axs[i].matshow(sampled_attn)
outpath = os.path.join(self.sampledir, f'epoch{self.current_epoch}-{prefix}-{self.sampled_attention_plots}.png')
fig.savefig(outpath)
fig.clf()
plt.close()
def on_validation_epoch_end(self):
if self.trainer.local_rank != 0:
return
print ("Synthesizing Audio with unpaired emo/text from sampled validation set...")
#Run with mis-paired reference audio / text
self.sampled_attention_plots = 0
#Use the first text as text condition, observe the difference when inputting different reference speech
text = self.sampled_text[0].unsqueeze(0)
ref_text_name = self.text_names[0]
ref_text = self.data.label[ref_text_name]['text']
with open(os.path.join(self.sampledir, f'epoch{self.current_epoch}.txt'), 'w') as f:
f.write(ref_text)
for i in tqdm(range(min(self.num_audio_sample, len(self.sampled_text)))):
emo = self.sampled_emotion[i].unsqueeze(0)
with torch.no_grad():
if self.use_lst:
mel, attns = self.model.inference(emo, emo, text, maxlen=self.maxlength, threshold=.5)
else:
mel, attns = self.model.inference(emo, text, maxlen=self.maxlength, threshold=.5)
audio = self.vocoder.infer(mel.transpose(1, 2), sigma=0.6) * 32768.0
audio = audio.squeeze(0).cpu().numpy().astype('int16')
sf.write(os.path.join(self.sampledir, f'epoch{self.current_epoch}-{i}.wav'), audio, 22050)
if self.sampled_attention_plots < self.n_attn_plots:
self.plot_attn(attns, 'inf_ed')
self.sampled_attention_plots += 1
ref_audio_name = self.reference_names[i]
raw_path = os.path.join(self.datadir, '16k_wav', ref_audio_name)
shutil.copyfile(raw_path + '.wav', os.path.join(self.sampledir, f'epoch{self.current_epoch}-{i}-ref.wav'))
| en | 0.740634 | #Extract state dict #For waveglow vocoder #Learning rate scheduler #Each layers #Run with mis-paired reference audio / text #Use the first text as text condition, observe the difference when inputting different reference speech | 1.907969 | 2 |
wk/web/applications/demo/__init__.py | Peiiii/wk | 0 | 6618129 | from .demo import * | from .demo import * | none | 1 | 1.194638 | 1 | |
decred/examples/send_testnet.py | JoeGruffins/tinydecred | 0 | 6618130 | <gh_stars>0
"""
Copyright (c) 2019, The Decred developers
This example script will send 1 DCR from a wallet as created with the
create_testnet_wallet.py example script to the return address from the testnet
faucet at https://faucet.decred.org/.
Before running this script, send the wallet some DCR from the faucet.
"""
from getpass import getpass
from decred.wallet.wallet import SimpleWallet
# Testnet return address for faucet.decred.org.
TESTNET_ADDRESS = "TsfDLrRkk9ciUuwfp2b8PawwnukYD7yAjGd"
def main():
value = int(1 * 1e8) # 1 DCR, atoms
password = getpass()
walletDir = "wallets"
try:
print("Opening and synchronizing wallet")
wallet = SimpleWallet(walletDir, password, "testnet")
except Exception as e:
print("Failed to open wallet with provided password: %s" % e)
exit()
try:
# Send some DCR.
tx = wallet.sendToAddress(value, TESTNET_ADDRESS)
# Print the transaction ID and a dcrdata link.
print("Transaction ID: %s" % tx.id())
print("See transaction at https://testnet.dcrdata.org/tx/%s" % tx.id())
except Exception as e:
print("Failed to send transaction: %s" % e)
finally:
wallet.close()
if __name__ == "__main__":
main()
| """
Copyright (c) 2019, The Decred developers
This example script will send 1 DCR from a wallet as created with the
create_testnet_wallet.py example script to the return address from the testnet
faucet at https://faucet.decred.org/.
Before running this script, send the wallet some DCR from the faucet.
"""
from getpass import getpass
from decred.wallet.wallet import SimpleWallet
# Testnet return address for faucet.decred.org.
TESTNET_ADDRESS = "TsfDLrRkk9ciUuwfp2b8PawwnukYD7yAjGd"
def main():
value = int(1 * 1e8) # 1 DCR, atoms
password = getpass()
walletDir = "wallets"
try:
print("Opening and synchronizing wallet")
wallet = SimpleWallet(walletDir, password, "testnet")
except Exception as e:
print("Failed to open wallet with provided password: %s" % e)
exit()
try:
# Send some DCR.
tx = wallet.sendToAddress(value, TESTNET_ADDRESS)
# Print the transaction ID and a dcrdata link.
print("Transaction ID: %s" % tx.id())
print("See transaction at https://testnet.dcrdata.org/tx/%s" % tx.id())
except Exception as e:
print("Failed to send transaction: %s" % e)
finally:
wallet.close()
if __name__ == "__main__":
main() | en | 0.830007 | Copyright (c) 2019, The Decred developers This example script will send 1 DCR from a wallet as created with the create_testnet_wallet.py example script to the return address from the testnet faucet at https://faucet.decred.org/. Before running this script, send the wallet some DCR from the faucet. # Testnet return address for faucet.decred.org. # 1 DCR, atoms # Send some DCR. # Print the transaction ID and a dcrdata link. | 3.083768 | 3 |
tradssat/genotype/vars_/chgro.py | FabioSeixas/traDSSAT | 6 | 6618131 | from tradssat.genotype.vars_._cropgro import cropgro_cul_vars, cropgro_eco_vars
cul_vars_CHGRO = cropgro_cul_vars()
eco_vars_CHGRO = cropgro_eco_vars()
| from tradssat.genotype.vars_._cropgro import cropgro_cul_vars, cropgro_eco_vars
cul_vars_CHGRO = cropgro_cul_vars()
eco_vars_CHGRO = cropgro_eco_vars()
| none | 1 | 1.244478 | 1 | |
chromeinstaller/ubuntuinstaller.py | PedroVictorCoding/AutomationToolLinux | 0 | 6618132 | import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#Update system
print(bcolors.OKGREEN)
print("Updating your system with sudo")
print(bcolors.ENDC)
os.system("sudo apt-get update")
#Installing Chrome
print(bcolors.OKGREEN)
print("Installing Google Chrome Stable Version")
print(bcolors.ENDC)
os.system("wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -")
os.system("echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' | sudo tee /etc/apt/sources.list.d/google-chrome.list")
os.system("sudo apt-get install google-chrome-stable")
#Action Completed
print(bcolors.OKGREEN)
print("Installation Completed")
print(bcolors.ENDC)
print(bcolors.WARNING + "Check for any erros")
print(bcolors.ENDC)
#Run program
print(bcolors.OKGREEN + "Running the program" + bcolors.ENDC)
print(" ")
print("Thanks for using this tool, please ask for more installations in my github repository")
os.system("google-chrome-stable")
print(bcolors.ENDC)
| import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#Update system
print(bcolors.OKGREEN)
print("Updating your system with sudo")
print(bcolors.ENDC)
os.system("sudo apt-get update")
#Installing Chrome
print(bcolors.OKGREEN)
print("Installing Google Chrome Stable Version")
print(bcolors.ENDC)
os.system("wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -")
os.system("echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' | sudo tee /etc/apt/sources.list.d/google-chrome.list")
os.system("sudo apt-get install google-chrome-stable")
#Action Completed
print(bcolors.OKGREEN)
print("Installation Completed")
print(bcolors.ENDC)
print(bcolors.WARNING + "Check for any erros")
print(bcolors.ENDC)
#Run program
print(bcolors.OKGREEN + "Running the program" + bcolors.ENDC)
print(" ")
print("Thanks for using this tool, please ask for more installations in my github repository")
os.system("google-chrome-stable")
print(bcolors.ENDC)
| en | 0.686027 | #Update system #Installing Chrome #Action Completed #Run program | 2.439028 | 2 |
src/PBGWraps/enumUtilities.py | as1m0n/spheral | 19 | 6618133 | <reponame>as1m0n/spheral<gh_stars>10-100
#-------------------------------------------------------------------------------
# Method to find and parse the named enum from a file.
#-------------------------------------------------------------------------------
def parseEnumDef(enumName, fileName, enclosing_type="enum", val_index=0):
f = open(fileName, "r")
lines = f.readlines()
result = []
accumulateLines = False
for line in lines:
stuff = line.split()
if len(stuff) > 0 and accumulateLines and stuff[0] == "};":
accumulateLines = False
if len(stuff) > 0 and accumulateLines:
result.append(stuff[val_index])
if len(stuff) > 1 and stuff[0] == enclosing_type and stuff[1] == enumName:
accumulateLines = True
return result
#-------------------------------------------------------------------------------
# Add an enum of the given name to the specified scope.
#-------------------------------------------------------------------------------
def addEnumDefinition(scope, enumName, fileName):
enumValues = parseEnumDef(enumName, fileName)
return scope.add_enum(enumName, enumValues)
#-------------------------------------------------------------------------------
# Add a struct of the given name with a bunch of integer members.
#-------------------------------------------------------------------------------
def addStructAsEnumDefinition(scope, structName, fileName):
enumValues = parseEnumDef(structName, fileName, "struct", 3)
x = scope.add_struct(structName)
for val in enumValues:
x.add_static_attribute(val, "long", is_const=True)
return x
| #-------------------------------------------------------------------------------
# Method to find and parse the named enum from a file.
#-------------------------------------------------------------------------------
def parseEnumDef(enumName, fileName, enclosing_type="enum", val_index=0):
f = open(fileName, "r")
lines = f.readlines()
result = []
accumulateLines = False
for line in lines:
stuff = line.split()
if len(stuff) > 0 and accumulateLines and stuff[0] == "};":
accumulateLines = False
if len(stuff) > 0 and accumulateLines:
result.append(stuff[val_index])
if len(stuff) > 1 and stuff[0] == enclosing_type and stuff[1] == enumName:
accumulateLines = True
return result
#-------------------------------------------------------------------------------
# Add an enum of the given name to the specified scope.
#-------------------------------------------------------------------------------
def addEnumDefinition(scope, enumName, fileName):
enumValues = parseEnumDef(enumName, fileName)
return scope.add_enum(enumName, enumValues)
#-------------------------------------------------------------------------------
# Add a struct of the given name with a bunch of integer members.
#-------------------------------------------------------------------------------
def addStructAsEnumDefinition(scope, structName, fileName):
enumValues = parseEnumDef(structName, fileName, "struct", 3)
x = scope.add_struct(structName)
for val in enumValues:
x.add_static_attribute(val, "long", is_const=True)
return x | en | 0.164102 | #------------------------------------------------------------------------------- # Method to find and parse the named enum from a file. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Add an enum of the given name to the specified scope. #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Add a struct of the given name with a bunch of integer members. #------------------------------------------------------------------------------- | 3.550229 | 4 |
backend/api/avrae/cogs5e/sheets/gsheet.py | XoriensLair/XoriensLair.github.io | 0 | 6618134 | """
Created on May 8, 2017
@author: andrew
"""
import asyncio
import datetime
import json
import logging
import os
import re
from contextlib import contextmanager
import gspread
from gspread import SpreadsheetNotFound
from gspread.exceptions import APIError
from gspread.utils import a1_to_rowcol, fill_gaps
from oauth2client.service_account import ServiceAccountCredentials
from api.avrae.cogs5e.funcs.dice import get_roll_comment
from api.avrae.cogs5e.funcs.lookupFuncs import compendium
from api.avrae.cogs5e.models.character import Character
from api.avrae.cogs5e.models.errors import ExternalImportError
from api.avrae.cogs5e.models.sheet.attack import Attack, AttackList
from api.avrae.cogs5e.models.sheet.base import BaseStats, Levels, Resistances, Saves, Skill, Skills
from api.avrae.cogs5e.models.sheet.spellcasting import Spellbook, SpellbookSpell
from api.avrae.cogs5e.sheets.abc import SHEET_VERSION, SheetLoaderABC
from api.avrae.cogs5e.sheets.errors import MissingAttribute
from api.avrae.utils import config
from api.avrae.utils.constants import DAMAGE_TYPES
from api.avrae.utils.functions import search
log = logging.getLogger(__name__)
POS_RE = re.compile(r"([A-Z]+)(\d+)")
IGNORED_SPELL_VALUES = {
'MAX', 'SLOTS', 'CANTRIPS', '1ST LEVEL', '2ND LEVEL', '3RD LEVEL', '4TH LEVEL', '5TH LEVEL',
'6TH LEVEL', '7TH LEVEL', '8TH LEVEL', '9TH LEVEL', '\u25c9', '\u25cd',
"You can hide each level of spells individually by hiding the rows (on the left)."
}
BASE_ABILITY_CHECKS = ( # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL)
('C13', 'strength', None), ('C18', 'dexterity', None), ('C23', 'constitution', None),
('C33', 'wisdom', None), ('C28', 'intelligence', None), ('C38', 'charisma', None)
)
SKILL_CELL_MAP = ( # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL)
(25, 'acrobatics', None), (26, 'animalHandling', None), (27, 'arcana', None),
(28, 'athletics', None), (22, 'charismaSave', None), (19, 'constitutionSave', None),
(29, 'deception', None), (18, 'dexteritySave', None), (30, 'history', None),
('V12', 'initiative', 'V11'), (31, 'insight', None), (20, 'intelligenceSave', None),
(32, 'intimidation', None), (33, 'investigation', None), (34, 'medicine', None),
(35, 'nature', None), (36, 'perception', None), (37, 'performance', None),
(38, 'persuasion', None), (39, 'religion', None), (40, 'sleightOfHand', None),
(41, 'stealth', None), (17, 'strengthSave', None), (42, 'survival', None),
(21, 'wisdomSave', None)
)
RESIST_COLS = (('resist', 'T'), # T69:T79, 1.4/2.x
('immune', 'AE'), # AE69:AE79, 1.4/2.0
('immune', 'AB'), # AB69:AB79, 2.1 only
('vuln', 'AI')) # AI69:AI79, 2.1 only
SCOPES = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
def letter2num(letters, zbase=True):
"""A = 1, C = 3 and so on. Convert spreadsheet style column
enumeration to a number.
"""
letters = letters.upper()
res = 0
weight = len(letters) - 1
for i, ch in enumerate(letters):
res += (ord(ch) - 64) * 26 ** (weight - i)
if not zbase:
return res
return res - 1
class TempCharacter:
def __init__(self, worksheet):
self.worksheet = worksheet
self.values = worksheet.get_all_values()
self.unformatted_values = self._get_all_unformatted_values()
def _get_all_unformatted_values(self):
data = self.worksheet.spreadsheet.values_get(
self.worksheet.title,
params={'valueRenderOption': "UNFORMATTED_VALUE"})
try:
return fill_gaps(data['values'])
except KeyError:
return []
@staticmethod
def _get_value(source, pos):
_pos = POS_RE.match(pos)
if _pos is None:
raise ValueError("No A1-style position found.")
col = letter2num(_pos.group(1))
row = int(_pos.group(2)) - 1
if row > len(source) or col > len(source[row]):
raise IndexError("Cell out of bounds.")
value = source[row][col]
log.debug(f"Cell {pos}: {value}")
return value
def value(self, pos):
return self._get_value(self.values, pos)
def unformatted_value(self, pos):
return self._get_value(self.unformatted_values, pos)
def value_range(self, rng):
"""Returns a list of values in a range."""
start, end = rng.split(':')
(row_offset, column_offset) = a1_to_rowcol(start)
(last_row, last_column) = a1_to_rowcol(end)
out = []
for col in self.values[row_offset - 1:last_row]:
out.extend(col[column_offset - 1:last_column])
return out
class GoogleSheet(SheetLoaderABC):
g_client = None
_client_initializing = False
_token_expiry = None
def __init__(self, url):
super(GoogleSheet, self).__init__(url)
self.additional = None
self.version = (1, 0) # major, minor
self.total_level = 0
# cache
self._stats = None
# google api stuff
@staticmethod
@contextmanager
def _client_lock():
if GoogleSheet._client_initializing:
raise ExternalImportError("I am still connecting to google. Try again in a few seconds.")
GoogleSheet._client_initializing = True
yield
GoogleSheet._client_initializing = False
@staticmethod
async def _init_gsheet_client():
with GoogleSheet._client_lock():
def _():
if config.GOOGLE_SERVICE_ACCOUNT is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
json.loads(config.GOOGLE_SERVICE_ACCOUNT),
scopes=SCOPES)
else:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
"avrae-google.json",
scopes=SCOPES)
return gspread.authorize(credentials)
try:
GoogleSheet.g_client = await asyncio.get_event_loop().run_in_executor(None, _)
except:
GoogleSheet._client_initializing = False
raise
GoogleSheet._token_expiry = datetime.datetime.now() + datetime.timedelta(
seconds=ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS)
log.info("Logged in to google")
@staticmethod
async def _refresh_google_token():
with GoogleSheet._client_lock():
def _():
import httplib2
http = httplib2.Http()
GoogleSheet.g_client.auth.refresh(http)
GoogleSheet.g_client.session.headers.update({
'Authorization': 'Bearer %s' % GoogleSheet.g_client.auth.access_token
})
try:
await asyncio.get_event_loop().run_in_executor(None, _)
except:
GoogleSheet._client_initializing = False
raise
log.info("Refreshed google token")
@staticmethod
def _is_expired():
return datetime.datetime.now() > GoogleSheet._token_expiry
# load character data
def _gchar(self):
doc = GoogleSheet.g_client.open_by_key(self.url)
self.character_data = TempCharacter(doc.sheet1)
vcell = self.character_data.value("AQ4")
if '1.3' in vcell:
self.version = (1, 3)
elif vcell:
self.additional = TempCharacter(doc.get_worksheet(1))
self.version = (2, 1) if "2.1" in vcell else (2, 0) if "2" in vcell else (1, 0)
# main loading methods
async def load_character(self, owner_id: str, args):
"""
Downloads and parses the character data, returning a fully-formed Character object.
:raises ExternalImportError if something went wrong during the import that we can expect
:raises Exception if something weirder happened
"""
try:
await self.get_character()
except (KeyError, SpreadsheetNotFound, APIError):
raise ExternalImportError("Invalid character sheet. Make sure you've shared it with me at `<EMAIL>`!")
except Exception:
raise
return await asyncio.get_event_loop().run_in_executor(None, self._load_character, owner_id, args)
def _load_character(self, owner_id: str, args):
upstream = f"google-{self.url}"
active = False
sheet_type = "google"
import_version = SHEET_VERSION
name = self.character_data.value("C6").strip() or "Unnamed"
description = self.get_description()
image = self.character_data.value("C176").strip()
stats = self.get_stats()
levels = self.get_levels()
attacks = self.get_attacks()
skills, saves = self.get_skills_and_saves()
resistances = self.get_resistances()
ac = self.get_ac()
max_hp = self.get_hp()
hp = max_hp
temp_hp = 0
cvars = {}
options = {}
overrides = {}
death_saves = {}
consumables = []
spellbook = self.get_spellbook()
live = None
race = self.get_race()
background = self.get_background()
xp = self.get_xp()
character = Character(
owner_id, upstream, active, sheet_type, import_version, name, description, image, stats, levels, attacks,
skills, resistances, saves, ac, max_hp, hp, temp_hp, cvars, options, overrides, consumables, death_saves,
spellbook, live, race, background, xp
)
return character
async def get_character(self):
if GoogleSheet.g_client is None:
await self._init_gsheet_client()
elif GoogleSheet._is_expired():
await self._refresh_google_token()
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self._gchar)
# calculator functions
def get_xp(self):
try:
return int(''.join([i for i in self.character_data.value("AE7") if i in '0123456789']))
except (TypeError, ValueError):
raise MissingAttribute("AC")
def get_description(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
g = character.value("C150").lower()
n = character.value("C6")
pronoun = "She" if g == "female" else "He" if g == "male" else "They"
verb1 = "is" if pronoun != "They" else "are"
verb2 = "has" if pronoun != "They" else "have"
desc = "{0} is a level {1} {2} {3}. {4} {11} {5} years old, {6} tall, and appears to weigh about {7}." \
"{4} {12} {8} eyes, {9} hair, and {10} skin."
desc = desc.format(n,
character.value("AL6"),
character.value("T7"),
character.value("T5"),
pronoun,
character.value("C148") or "unknown",
character.value("F148") or "unknown",
character.value("I148") or "unknown",
character.value("F150").lower() or "unknown",
character.value("I150").lower() or "unknown",
character.value("L150").lower() or "unknown",
verb1, verb2)
return desc
def get_stats(self):
"""Returns a dict of stats."""
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
if self._stats is not None:
return self._stats
try:
prof_bonus = int(character.value("H14"))
except (TypeError, ValueError):
raise MissingAttribute("Proficiency Bonus")
index = 15
stat_dict = {}
for stat in ('strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma'):
try:
stat_dict[stat] = int(character.value("C" + str(index)))
index += 5
except (TypeError, ValueError):
raise MissingAttribute(stat)
stats = BaseStats(prof_bonus, **stat_dict)
self._stats = stats
return stats
def get_levels(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
try:
total_level = int(self.character_data.value("AL6"))
self.total_level = total_level
except ValueError:
raise MissingAttribute("Character level")
level_dict = {}
if self.additional:
for rownum in range(69, 79): # sheet2, C69:C78
namecell = f"C{rownum}"
levelcell = f"N{rownum}"
classname = self.additional.value(namecell)
if classname:
classlevel = int(self.additional.value(levelcell))
level_dict[classname] = classlevel
else: # classes should be top-aligned
break
levels = Levels(level_dict, total_level)
return levels
def get_attacks(self):
"""Returns an attack list."""
if self.character_data is None: raise Exception('You must call get_character() first.')
attacks = AttackList()
for rownum in range(32, 37): # sht1, R32:R36
a = self.parse_attack(f"R{rownum}", f"Y{rownum}", f"AC{rownum}")
if a is not None:
attacks.append(a)
if self.additional:
for rownum in range(3, 14): # sht2, B3:B13; W3:W13
additional = self.parse_attack(f"B{rownum}", f"I{rownum}", f"M{rownum}", self.additional)
other = self.parse_attack(f"W{rownum}", f"AD{rownum}", f"AH{rownum}", self.additional)
if additional is not None:
attacks.append(additional)
if other is not None:
attacks.append(other)
return attacks
def get_skills_and_saves(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
skills = {}
saves = {}
is_joat = False
all_check_bonus = 0
if self.version == (2, 0):
is_joat = bool(character.value("AR45"))
all_check_bonus = int(character.value("AQ26") or 0)
elif self.version == (2, 1):
is_joat = bool(character.value("AQ59"))
all_check_bonus = int(character.value("AR58"))
joat_bonus = int(is_joat and self.get_stats().prof_bonus // 2)
# calculate str, dex, con, etc checks
for cell, skill, advcell in BASE_ABILITY_CHECKS:
try:
# add bonuses manually since the cell does not include them
value = int(character.value(cell)) + all_check_bonus + joat_bonus
except (TypeError, ValueError):
raise MissingAttribute(skill)
prof = 0
if is_joat:
prof = 0.5
skl_obj = Skill(value, prof)
skills[skill] = skl_obj
# read the value of the rest of the skills
for cell, skill, advcell in SKILL_CELL_MAP:
if isinstance(cell, int):
advcell = f"F{cell}"
profcell = f"H{cell}"
cell = f"I{cell}"
else:
profcell = None
try:
value = int(character.value(cell))
except (TypeError, ValueError):
raise MissingAttribute(skill)
adv = None
if self.version >= (2, 0) and advcell:
advtype = character.unformatted_value(advcell)
if advtype in {'a', 'adv', 'advantage'}:
adv = True
elif advtype in {'d', 'dis', 'disadvantage'}:
adv = False
prof = 0
if "Save" not in skill and is_joat:
prof = 0.5
if profcell:
proftype = character.unformatted_value(profcell)
if proftype == 'e':
prof = 2
elif proftype and proftype != '0':
prof = 1
skl_obj = Skill(value, prof, adv=adv)
if "Save" in skill:
saves[skill] = skl_obj
else:
skills[skill] = skl_obj
skills = Skills(skills)
saves = Saves(saves)
return skills, saves
def get_resistances(self):
out = {'resist': [], 'immune': [], 'vuln': []}
if not self.additional: # requires 2.0
return Resistances.from_dict(out)
for rownum in range(69, 80):
for resist_type, col in RESIST_COLS:
try:
dtype = self.additional.value(f"{col}{rownum}")
except IndexError:
dtype = None
if dtype:
out[resist_type].append(dtype.lower())
return Resistances.from_dict(out)
def get_ac(self):
try:
return int(self.character_data.value("R12"))
except (TypeError, ValueError):
raise MissingAttribute("AC")
def get_hp(self):
try:
return int(self.character_data.value("U16"))
except (TypeError, ValueError):
raise MissingAttribute("Max HP")
def get_race(self):
return self.character_data.value('T7').strip()
def get_background(self):
if self.version >= (2, 0):
return self.character_data.value('AJ11').strip()
return self.character_data.value('Z5').strip()
def get_spellbook(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
# max slots
slots = {
'1': int(self.character_data.value("AK101") or 0),
'2': int(self.character_data.value("E107") or 0),
'3': int(self.character_data.value("AK113") or 0),
'4': int(self.character_data.value("E119") or 0),
'5': int(self.character_data.value("AK124") or 0),
'6': int(self.character_data.value("E129") or 0),
'7': int(self.character_data.value("AK134") or 0),
'8': int(self.character_data.value("E138") or 0),
'9': int(self.character_data.value("AK142") or 0)
}
# spells C96:AH143
potential_spells = self.character_data.value_range("D96:AH143")
if self.additional:
potential_spells.extend(self.additional.value_range("D17:AH64"))
spells = []
for value in potential_spells:
value = value.strip()
if len(value) > 2 and value not in IGNORED_SPELL_VALUES:
log.debug(f"Searching for spell {value}")
result, strict = search(compendium.spells, value, lambda sp: sp.name, strict=True)
if result and strict:
spells.append(SpellbookSpell(result.name, True))
else:
spells.append(SpellbookSpell(value.strip()))
# dc
try:
dc = int(self.character_data.value("AB91") or 0)
except ValueError:
dc = None
# sab
try:
sab = int(self.character_data.value("AI91") or 0)
except ValueError:
sab = None
# spellcasting mod
spell_mod_value = self.character_data.value("U91")
spell_mod = None
if spell_mod_value: # it might be in the form of a ability name, or an int, wjdk
try:
spell_mod = self.get_stats().get_mod(spell_mod_value)
except ValueError:
try:
spell_mod = int(spell_mod_value)
except (TypeError, ValueError):
spell_mod = None
spellbook = Spellbook(slots, slots, spells, dc, sab, self.total_level, spell_mod)
return spellbook
# helper methods
def parse_attack(self, name_index, bonus_index, damage_index, sheet=None):
"""Calculates and returns a dict."""
if self.character_data is None: raise Exception('You must call get_character() first.')
wksht = sheet or self.character_data
name = wksht.value(name_index)
damage = wksht.value(damage_index)
bonus = wksht.value(bonus_index)
details = None
if not name:
return None
if not damage:
damage = None
else:
details = None
if '|' in damage:
damage, details = damage.split('|', 1)
dice, comment = get_roll_comment(damage)
if details:
details = details.strip()
if any(d in comment.lower() for d in DAMAGE_TYPES):
damage = "{}[{}]".format(dice, comment)
else:
damage = dice
if comment.strip() and not details:
damage = comment.strip()
if bonus:
try:
bonus = int(bonus)
except (TypeError, ValueError):
bonus = None
else:
bonus = None
attack = Attack.new(name, bonus, damage, details)
return attack
import asyncio
import json
from api.avrae.utils.argparser import argparse
def getJSON_gsheet(url):
parser = GoogleSheet(url)
char = asyncio.get_event_loop().run_until_complete(parser.load_character("", argparse("")))
'''print(json.dumps(parser.calculated_stats, indent=2))
print(f"set: {parser.set_calculated_stats}")
input("press enter to view character data")'''
return json.dumps(char.to_dict(), indent=2) | """
Created on May 8, 2017
@author: andrew
"""
import asyncio
import datetime
import json
import logging
import os
import re
from contextlib import contextmanager
import gspread
from gspread import SpreadsheetNotFound
from gspread.exceptions import APIError
from gspread.utils import a1_to_rowcol, fill_gaps
from oauth2client.service_account import ServiceAccountCredentials
from api.avrae.cogs5e.funcs.dice import get_roll_comment
from api.avrae.cogs5e.funcs.lookupFuncs import compendium
from api.avrae.cogs5e.models.character import Character
from api.avrae.cogs5e.models.errors import ExternalImportError
from api.avrae.cogs5e.models.sheet.attack import Attack, AttackList
from api.avrae.cogs5e.models.sheet.base import BaseStats, Levels, Resistances, Saves, Skill, Skills
from api.avrae.cogs5e.models.sheet.spellcasting import Spellbook, SpellbookSpell
from api.avrae.cogs5e.sheets.abc import SHEET_VERSION, SheetLoaderABC
from api.avrae.cogs5e.sheets.errors import MissingAttribute
from api.avrae.utils import config
from api.avrae.utils.constants import DAMAGE_TYPES
from api.avrae.utils.functions import search
log = logging.getLogger(__name__)
POS_RE = re.compile(r"([A-Z]+)(\d+)")
IGNORED_SPELL_VALUES = {
'MAX', 'SLOTS', 'CANTRIPS', '1ST LEVEL', '2ND LEVEL', '3RD LEVEL', '4TH LEVEL', '5TH LEVEL',
'6TH LEVEL', '7TH LEVEL', '8TH LEVEL', '9TH LEVEL', '\u25c9', '\u25cd',
"You can hide each level of spells individually by hiding the rows (on the left)."
}
BASE_ABILITY_CHECKS = ( # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL)
('C13', 'strength', None), ('C18', 'dexterity', None), ('C23', 'constitution', None),
('C33', 'wisdom', None), ('C28', 'intelligence', None), ('C38', 'charisma', None)
)
SKILL_CELL_MAP = ( # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL)
(25, 'acrobatics', None), (26, 'animalHandling', None), (27, 'arcana', None),
(28, 'athletics', None), (22, 'charismaSave', None), (19, 'constitutionSave', None),
(29, 'deception', None), (18, 'dexteritySave', None), (30, 'history', None),
('V12', 'initiative', 'V11'), (31, 'insight', None), (20, 'intelligenceSave', None),
(32, 'intimidation', None), (33, 'investigation', None), (34, 'medicine', None),
(35, 'nature', None), (36, 'perception', None), (37, 'performance', None),
(38, 'persuasion', None), (39, 'religion', None), (40, 'sleightOfHand', None),
(41, 'stealth', None), (17, 'strengthSave', None), (42, 'survival', None),
(21, 'wisdomSave', None)
)
RESIST_COLS = (('resist', 'T'), # T69:T79, 1.4/2.x
('immune', 'AE'), # AE69:AE79, 1.4/2.0
('immune', 'AB'), # AB69:AB79, 2.1 only
('vuln', 'AI')) # AI69:AI79, 2.1 only
SCOPES = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
def letter2num(letters, zbase=True):
"""A = 1, C = 3 and so on. Convert spreadsheet style column
enumeration to a number.
"""
letters = letters.upper()
res = 0
weight = len(letters) - 1
for i, ch in enumerate(letters):
res += (ord(ch) - 64) * 26 ** (weight - i)
if not zbase:
return res
return res - 1
class TempCharacter:
def __init__(self, worksheet):
self.worksheet = worksheet
self.values = worksheet.get_all_values()
self.unformatted_values = self._get_all_unformatted_values()
def _get_all_unformatted_values(self):
data = self.worksheet.spreadsheet.values_get(
self.worksheet.title,
params={'valueRenderOption': "UNFORMATTED_VALUE"})
try:
return fill_gaps(data['values'])
except KeyError:
return []
@staticmethod
def _get_value(source, pos):
_pos = POS_RE.match(pos)
if _pos is None:
raise ValueError("No A1-style position found.")
col = letter2num(_pos.group(1))
row = int(_pos.group(2)) - 1
if row > len(source) or col > len(source[row]):
raise IndexError("Cell out of bounds.")
value = source[row][col]
log.debug(f"Cell {pos}: {value}")
return value
def value(self, pos):
return self._get_value(self.values, pos)
def unformatted_value(self, pos):
return self._get_value(self.unformatted_values, pos)
def value_range(self, rng):
"""Returns a list of values in a range."""
start, end = rng.split(':')
(row_offset, column_offset) = a1_to_rowcol(start)
(last_row, last_column) = a1_to_rowcol(end)
out = []
for col in self.values[row_offset - 1:last_row]:
out.extend(col[column_offset - 1:last_column])
return out
class GoogleSheet(SheetLoaderABC):
g_client = None
_client_initializing = False
_token_expiry = None
def __init__(self, url):
super(GoogleSheet, self).__init__(url)
self.additional = None
self.version = (1, 0) # major, minor
self.total_level = 0
# cache
self._stats = None
# google api stuff
@staticmethod
@contextmanager
def _client_lock():
if GoogleSheet._client_initializing:
raise ExternalImportError("I am still connecting to google. Try again in a few seconds.")
GoogleSheet._client_initializing = True
yield
GoogleSheet._client_initializing = False
@staticmethod
async def _init_gsheet_client():
with GoogleSheet._client_lock():
def _():
if config.GOOGLE_SERVICE_ACCOUNT is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
json.loads(config.GOOGLE_SERVICE_ACCOUNT),
scopes=SCOPES)
else:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
"avrae-google.json",
scopes=SCOPES)
return gspread.authorize(credentials)
try:
GoogleSheet.g_client = await asyncio.get_event_loop().run_in_executor(None, _)
except:
GoogleSheet._client_initializing = False
raise
GoogleSheet._token_expiry = datetime.datetime.now() + datetime.timedelta(
seconds=ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS)
log.info("Logged in to google")
@staticmethod
async def _refresh_google_token():
with GoogleSheet._client_lock():
def _():
import httplib2
http = httplib2.Http()
GoogleSheet.g_client.auth.refresh(http)
GoogleSheet.g_client.session.headers.update({
'Authorization': 'Bearer %s' % GoogleSheet.g_client.auth.access_token
})
try:
await asyncio.get_event_loop().run_in_executor(None, _)
except:
GoogleSheet._client_initializing = False
raise
log.info("Refreshed google token")
@staticmethod
def _is_expired():
return datetime.datetime.now() > GoogleSheet._token_expiry
# load character data
def _gchar(self):
doc = GoogleSheet.g_client.open_by_key(self.url)
self.character_data = TempCharacter(doc.sheet1)
vcell = self.character_data.value("AQ4")
if '1.3' in vcell:
self.version = (1, 3)
elif vcell:
self.additional = TempCharacter(doc.get_worksheet(1))
self.version = (2, 1) if "2.1" in vcell else (2, 0) if "2" in vcell else (1, 0)
# main loading methods
async def load_character(self, owner_id: str, args):
"""
Downloads and parses the character data, returning a fully-formed Character object.
:raises ExternalImportError if something went wrong during the import that we can expect
:raises Exception if something weirder happened
"""
try:
await self.get_character()
except (KeyError, SpreadsheetNotFound, APIError):
raise ExternalImportError("Invalid character sheet. Make sure you've shared it with me at `<EMAIL>`!")
except Exception:
raise
return await asyncio.get_event_loop().run_in_executor(None, self._load_character, owner_id, args)
def _load_character(self, owner_id: str, args):
upstream = f"google-{self.url}"
active = False
sheet_type = "google"
import_version = SHEET_VERSION
name = self.character_data.value("C6").strip() or "Unnamed"
description = self.get_description()
image = self.character_data.value("C176").strip()
stats = self.get_stats()
levels = self.get_levels()
attacks = self.get_attacks()
skills, saves = self.get_skills_and_saves()
resistances = self.get_resistances()
ac = self.get_ac()
max_hp = self.get_hp()
hp = max_hp
temp_hp = 0
cvars = {}
options = {}
overrides = {}
death_saves = {}
consumables = []
spellbook = self.get_spellbook()
live = None
race = self.get_race()
background = self.get_background()
xp = self.get_xp()
character = Character(
owner_id, upstream, active, sheet_type, import_version, name, description, image, stats, levels, attacks,
skills, resistances, saves, ac, max_hp, hp, temp_hp, cvars, options, overrides, consumables, death_saves,
spellbook, live, race, background, xp
)
return character
async def get_character(self):
if GoogleSheet.g_client is None:
await self._init_gsheet_client()
elif GoogleSheet._is_expired():
await self._refresh_google_token()
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self._gchar)
# calculator functions
def get_xp(self):
try:
return int(''.join([i for i in self.character_data.value("AE7") if i in '0123456789']))
except (TypeError, ValueError):
raise MissingAttribute("AC")
def get_description(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
g = character.value("C150").lower()
n = character.value("C6")
pronoun = "She" if g == "female" else "He" if g == "male" else "They"
verb1 = "is" if pronoun != "They" else "are"
verb2 = "has" if pronoun != "They" else "have"
desc = "{0} is a level {1} {2} {3}. {4} {11} {5} years old, {6} tall, and appears to weigh about {7}." \
"{4} {12} {8} eyes, {9} hair, and {10} skin."
desc = desc.format(n,
character.value("AL6"),
character.value("T7"),
character.value("T5"),
pronoun,
character.value("C148") or "unknown",
character.value("F148") or "unknown",
character.value("I148") or "unknown",
character.value("F150").lower() or "unknown",
character.value("I150").lower() or "unknown",
character.value("L150").lower() or "unknown",
verb1, verb2)
return desc
def get_stats(self):
"""Returns a dict of stats."""
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
if self._stats is not None:
return self._stats
try:
prof_bonus = int(character.value("H14"))
except (TypeError, ValueError):
raise MissingAttribute("Proficiency Bonus")
index = 15
stat_dict = {}
for stat in ('strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma'):
try:
stat_dict[stat] = int(character.value("C" + str(index)))
index += 5
except (TypeError, ValueError):
raise MissingAttribute(stat)
stats = BaseStats(prof_bonus, **stat_dict)
self._stats = stats
return stats
def get_levels(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
try:
total_level = int(self.character_data.value("AL6"))
self.total_level = total_level
except ValueError:
raise MissingAttribute("Character level")
level_dict = {}
if self.additional:
for rownum in range(69, 79): # sheet2, C69:C78
namecell = f"C{rownum}"
levelcell = f"N{rownum}"
classname = self.additional.value(namecell)
if classname:
classlevel = int(self.additional.value(levelcell))
level_dict[classname] = classlevel
else: # classes should be top-aligned
break
levels = Levels(level_dict, total_level)
return levels
def get_attacks(self):
"""Returns an attack list."""
if self.character_data is None: raise Exception('You must call get_character() first.')
attacks = AttackList()
for rownum in range(32, 37): # sht1, R32:R36
a = self.parse_attack(f"R{rownum}", f"Y{rownum}", f"AC{rownum}")
if a is not None:
attacks.append(a)
if self.additional:
for rownum in range(3, 14): # sht2, B3:B13; W3:W13
additional = self.parse_attack(f"B{rownum}", f"I{rownum}", f"M{rownum}", self.additional)
other = self.parse_attack(f"W{rownum}", f"AD{rownum}", f"AH{rownum}", self.additional)
if additional is not None:
attacks.append(additional)
if other is not None:
attacks.append(other)
return attacks
def get_skills_and_saves(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
character = self.character_data
skills = {}
saves = {}
is_joat = False
all_check_bonus = 0
if self.version == (2, 0):
is_joat = bool(character.value("AR45"))
all_check_bonus = int(character.value("AQ26") or 0)
elif self.version == (2, 1):
is_joat = bool(character.value("AQ59"))
all_check_bonus = int(character.value("AR58"))
joat_bonus = int(is_joat and self.get_stats().prof_bonus // 2)
# calculate str, dex, con, etc checks
for cell, skill, advcell in BASE_ABILITY_CHECKS:
try:
# add bonuses manually since the cell does not include them
value = int(character.value(cell)) + all_check_bonus + joat_bonus
except (TypeError, ValueError):
raise MissingAttribute(skill)
prof = 0
if is_joat:
prof = 0.5
skl_obj = Skill(value, prof)
skills[skill] = skl_obj
# read the value of the rest of the skills
for cell, skill, advcell in SKILL_CELL_MAP:
if isinstance(cell, int):
advcell = f"F{cell}"
profcell = f"H{cell}"
cell = f"I{cell}"
else:
profcell = None
try:
value = int(character.value(cell))
except (TypeError, ValueError):
raise MissingAttribute(skill)
adv = None
if self.version >= (2, 0) and advcell:
advtype = character.unformatted_value(advcell)
if advtype in {'a', 'adv', 'advantage'}:
adv = True
elif advtype in {'d', 'dis', 'disadvantage'}:
adv = False
prof = 0
if "Save" not in skill and is_joat:
prof = 0.5
if profcell:
proftype = character.unformatted_value(profcell)
if proftype == 'e':
prof = 2
elif proftype and proftype != '0':
prof = 1
skl_obj = Skill(value, prof, adv=adv)
if "Save" in skill:
saves[skill] = skl_obj
else:
skills[skill] = skl_obj
skills = Skills(skills)
saves = Saves(saves)
return skills, saves
def get_resistances(self):
out = {'resist': [], 'immune': [], 'vuln': []}
if not self.additional: # requires 2.0
return Resistances.from_dict(out)
for rownum in range(69, 80):
for resist_type, col in RESIST_COLS:
try:
dtype = self.additional.value(f"{col}{rownum}")
except IndexError:
dtype = None
if dtype:
out[resist_type].append(dtype.lower())
return Resistances.from_dict(out)
def get_ac(self):
try:
return int(self.character_data.value("R12"))
except (TypeError, ValueError):
raise MissingAttribute("AC")
def get_hp(self):
try:
return int(self.character_data.value("U16"))
except (TypeError, ValueError):
raise MissingAttribute("Max HP")
def get_race(self):
return self.character_data.value('T7').strip()
def get_background(self):
if self.version >= (2, 0):
return self.character_data.value('AJ11').strip()
return self.character_data.value('Z5').strip()
def get_spellbook(self):
if self.character_data is None: raise Exception('You must call get_character() first.')
# max slots
slots = {
'1': int(self.character_data.value("AK101") or 0),
'2': int(self.character_data.value("E107") or 0),
'3': int(self.character_data.value("AK113") or 0),
'4': int(self.character_data.value("E119") or 0),
'5': int(self.character_data.value("AK124") or 0),
'6': int(self.character_data.value("E129") or 0),
'7': int(self.character_data.value("AK134") or 0),
'8': int(self.character_data.value("E138") or 0),
'9': int(self.character_data.value("AK142") or 0)
}
# spells C96:AH143
potential_spells = self.character_data.value_range("D96:AH143")
if self.additional:
potential_spells.extend(self.additional.value_range("D17:AH64"))
spells = []
for value in potential_spells:
value = value.strip()
if len(value) > 2 and value not in IGNORED_SPELL_VALUES:
log.debug(f"Searching for spell {value}")
result, strict = search(compendium.spells, value, lambda sp: sp.name, strict=True)
if result and strict:
spells.append(SpellbookSpell(result.name, True))
else:
spells.append(SpellbookSpell(value.strip()))
# dc
try:
dc = int(self.character_data.value("AB91") or 0)
except ValueError:
dc = None
# sab
try:
sab = int(self.character_data.value("AI91") or 0)
except ValueError:
sab = None
# spellcasting mod
spell_mod_value = self.character_data.value("U91")
spell_mod = None
if spell_mod_value: # it might be in the form of a ability name, or an int, wjdk
try:
spell_mod = self.get_stats().get_mod(spell_mod_value)
except ValueError:
try:
spell_mod = int(spell_mod_value)
except (TypeError, ValueError):
spell_mod = None
spellbook = Spellbook(slots, slots, spells, dc, sab, self.total_level, spell_mod)
return spellbook
# helper methods
def parse_attack(self, name_index, bonus_index, damage_index, sheet=None):
"""Calculates and returns a dict."""
if self.character_data is None: raise Exception('You must call get_character() first.')
wksht = sheet or self.character_data
name = wksht.value(name_index)
damage = wksht.value(damage_index)
bonus = wksht.value(bonus_index)
details = None
if not name:
return None
if not damage:
damage = None
else:
details = None
if '|' in damage:
damage, details = damage.split('|', 1)
dice, comment = get_roll_comment(damage)
if details:
details = details.strip()
if any(d in comment.lower() for d in DAMAGE_TYPES):
damage = "{}[{}]".format(dice, comment)
else:
damage = dice
if comment.strip() and not details:
damage = comment.strip()
if bonus:
try:
bonus = int(bonus)
except (TypeError, ValueError):
bonus = None
else:
bonus = None
attack = Attack.new(name, bonus, damage, details)
return attack
import asyncio
import json
from api.avrae.utils.argparser import argparse
def getJSON_gsheet(url):
parser = GoogleSheet(url)
char = asyncio.get_event_loop().run_until_complete(parser.load_character("", argparse("")))
'''print(json.dumps(parser.calculated_stats, indent=2))
print(f"set: {parser.set_calculated_stats}")
input("press enter to view character data")'''
return json.dumps(char.to_dict(), indent=2) | en | 0.768772 | Created on May 8, 2017 @author: andrew # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL) # list of (MOD_CELL/ROW, SKILL_NAME, ADV_CELL) # T69:T79, 1.4/2.x # AE69:AE79, 1.4/2.0 # AB69:AB79, 2.1 only # AI69:AI79, 2.1 only A = 1, C = 3 and so on. Convert spreadsheet style column enumeration to a number. Returns a list of values in a range. # major, minor # cache # google api stuff # load character data # main loading methods Downloads and parses the character data, returning a fully-formed Character object. :raises ExternalImportError if something went wrong during the import that we can expect :raises Exception if something weirder happened # calculator functions Returns a dict of stats. # sheet2, C69:C78 # classes should be top-aligned Returns an attack list. # sht1, R32:R36 # sht2, B3:B13; W3:W13 # calculate str, dex, con, etc checks # add bonuses manually since the cell does not include them # read the value of the rest of the skills # requires 2.0 # max slots # spells C96:AH143 # dc # sab # spellcasting mod # it might be in the form of a ability name, or an int, wjdk # helper methods Calculates and returns a dict. print(json.dumps(parser.calculated_stats, indent=2)) print(f"set: {parser.set_calculated_stats}") input("press enter to view character data") | 1.655499 | 2 |
src/pyiem/windrose_utils.py | akrherz/pyIEM | 29 | 6618135 | <reponame>akrherz/pyIEM
"""util script to call `windrose` package"""
from datetime import datetime, timezone
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
import numpy as np
import pandas as pd
from pandas.io.sql import read_sql
from metpy.units import units as mpunits
from pyiem.plot.util import fitbox
from pyiem.plot.windrose import histogram, plot, WindrosePlot
from pyiem.util import get_dbconn
from pyiem.network import Table as NetworkTable
WINDUNITS = {
"mph": {
"label": "miles per hour",
"units": mpunits("mph"),
"bins": (2, 5, 7, 10, 15, 20),
},
"kts": {
"label": "knots",
"units": mpunits("knots"),
"bins": (2, 5, 7, 10, 15, 20),
},
"mps": {
"label": "meters per second",
"units": mpunits("meter / second"),
"bins": (1, 4, 6, 8, 10, 12),
},
"kph": {
"label": "kilometers per hour",
"units": mpunits("kilometer / hour"),
"bins": (4, 10, 14, 20, 30, 40),
},
}
RAOB_BINS = {
"mph": [2, 25, 50, 75, 100, 150],
"kts": [2, 25, 50, 75, 100, 15],
"mps": [1, 10, 15, 25, 50, 75],
"kph": [4, 10, 14, 20, 30, 40],
}
def _get_timeinfo(arr, datepart, fullsize, tzname):
"""Convert the months/hours array provided into label text and SQL
Args:
arr (list): A list of ints
datepart (str): the part to extract from the database timestamp
fullsize (int): the size of specifying all dates
tzname (str): The timezone to compute this limiter in.
Returns:
dict with keys `sqltext` and `labeltext`
"""
sql = ""
lbl = "All included"
if len(arr) == 1:
sql = " and extract(%s from valid%s) = %s " % (
datepart,
"" if tzname is None else " at time zone '%s'" % (tzname,),
arr[0],
)
lbl = str(tuple(arr))
elif len(arr) < fullsize:
sql = (" and extract(%s from valid%s) in %s ") % (
datepart,
"" if tzname is None else " at time zone '%s'" % (tzname,),
(str(tuple(arr))).replace("'", ""),
)
lbl = str(tuple(arr))
return dict(sqltext=sql, labeltext=lbl)
def _get_data(station, database, sts, ets, monthinfo, hourinfo, level):
"""Helper function to get data out of IEM databases
Args:
station (str): the station identifier
database (str): the name of the database to connect to, we assume we can
then query a table called `alldata`
sts (datetime): the floor to query data for
ets (datetime): the ceiling to query data for
monthinfo (dict): information on how to query for months
hourinfo (dict): information on how to query for hours
level (int): in case of RAOB, which pressure level (hPa)
Returns:
pandas.DataFrame of the data
"""
# Query observations
db = get_dbconn(database, user="nobody")
rlimiter = ""
if database == "asos":
rlimiter = " and report_type = 2 "
sql = (
"SELECT sknt, drct, valid at time zone 'UTC' as valid "
f"from alldata WHERE station = '{station}' "
f"and valid > '{sts}' and valid < '{ets}' {monthinfo['sqltext']} "
f"{hourinfo['sqltext']} {rlimiter}"
)
if level is not None: # HACK!
db = get_dbconn("raob")
# here comes another hack, stations with starting with _ are virtual
stations = [station, "ZZZZ"]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = (
nt.sts.get(station, {})
.get("name", "X--YYY ZZZ")
.split("--")[1]
.strip()
.split(" ")
)
sql = """SELECT p.smps * 1.94384 as sknt, p.drct,
f.valid at time zone 'UTC' as valid from
raob_flights f JOIN raob_profile p on (f.fid = p.fid) WHERE
f.station in %s and p.pressure = %s and p.smps is not null
and p.drct is not null and valid >= '%s' and valid < '%s'
%s
%s
""" % (
str(tuple(stations)),
level,
sts,
ets,
monthinfo["sqltext"],
hourinfo["sqltext"],
)
df = read_sql(sql, db, index_col=None)
if not df.empty:
# Make valid column timezone aware
df["valid"] = df["valid"].dt.tz_localize(timezone.utc)
# If sknt or drct are null, we want to set the other to null as well
df.loc[pd.isnull(df["drct"]), "sknt"] = None
df.loc[pd.isnull(df["sknt"]), "drct"] = None
return df
def _make_textresult(
station,
df,
units,
nsector,
sname,
monthinfo,
hourinfo,
level,
bins,
tzname,
):
"""Generate a text table of windrose information
Args:
station (str): the station identifier
df (pd.DataFrame): The dataframe with data
units (str): the units of the `sknt` values
nsector (int): number of sectors to divide rose into
sname (str): the station name
monthinfo (dict): information on month limiting
hourinfo (dict): information on hour limiting
level (int): in case of RAOB, which level do we care for
bins (list): values to bin the wind speeds
tzname (str): Time zone for the report.
Returns:
str of information"""
if df.empty:
return "No Data Found"
wu = WINDUNITS[units]
if not hasattr(bins, "units"):
bins = wu["bins"] * wu["units"]
if level is not None:
bins = RAOB_BINS[units] * wu["units"]
# Effectively filters out the nulls
df2 = df[df["drct"] >= 0]
speed = df2["sknt"].values * mpunits("knots")
direction = df2["drct"].values * mpunits("degree")
calm_percent, dir_centers, table = histogram(
speed, direction, bins, nsector
)
res = ("# Windrose Data Table (Percent Frequency) " "for %s (%s)\n") % (
sname if sname is not None else "((%s))" % (station,),
station,
)
res += ("# Observations Used/Missing/Total: %s/%s/%s\n") % (
len(df2.index),
len(df.index) - len(df2.index),
len(df.index),
)
res += f"# {_time_domain_string(df, tzname)}\n"
res += "# Hour Limiter: %s\n" % (hourinfo["labeltext"],)
res += "# Month Limiter: %s\n" % (monthinfo["labeltext"],)
res += "# Wind Speed Units: %s\n" % (wu["label"],)
if level is not None:
res += "# RAOB Pressure (hPa) Level: %s\n" % (level,)
res += ("# Generated %s UTC, contact: <EMAIL>\n") % (
datetime.utcnow().strftime("%d %b %Y %H:%M"),
)
res += "# First value in table is CALM\n"
cols = ["Direction", "Calm"]
# Print out Speed Bins
for i, val in enumerate(bins.m):
maxval = (
"+"
if i == bins.m.shape[0] - 1
else " %4.1f" % (bins.m[i + 1] - 0.1,)
)
cols.append("%4.1f%s" % (val, maxval))
delta = dir_centers.m[1] - dir_centers.m[0]
res += ",".join(["%9s" % (c,) for c in cols]) + "\n"
for i, val in enumerate(dir_centers.m):
minval = val - delta / 2.0
if minval < 0:
minval += 360.0
maxval = np.min([360, val + delta / 2.0 - 1])
res += "%03i-%03i ,%9s," % (
minval,
maxval,
np.round(calm_percent.m, 2) if i == 0 else "",
)
res += ",".join(
["%9.3f" % (table.m[i, j],) for j in range(bins.m.shape[0])]
)
res += "\n"
return res
def _time_domain_string(df, tzname):
"""Custom time label option."""
sts = df["valid"].min().to_pydatetime()
ets = df["valid"].max().to_pydatetime()
timeformat = "%d %b %Y %I:%M %p"
if tzname is not None:
sts = sts.astimezone(ZoneInfo(tzname))
ets = ets.astimezone(ZoneInfo(tzname))
if tzname == "UTC":
timeformat = "%d %b %Y %H:%M"
return "%s - %s %s" % (
sts.strftime(timeformat),
ets.strftime(timeformat),
"" if tzname is None else tzname,
)
def _make_plot(
station,
df,
units,
nsector,
rmax,
hours,
months,
sname,
level,
bins,
tzname,
**kwargs,
):
"""Generate a matplotlib windrose plot
Args:
station (str): station identifier
df (pd.DataFrame): observations
drct (list): list of wind directions
units (str): units of wind speed
nsector (int): number of bins to use for windrose
rmax (float): radius of the plot
hours (list): hour limit for plot
month (list): month limit for plot
sname (str): station name
level (int): RAOB level in hPa of interest
bins (list): values for binning the wind speeds
tzname (str): Time zone this plot is produced in.
cmap (colormap): Matplotlib colormap to use.
Returns:
matplotlib.Figure
"""
wu = WINDUNITS[units]
# Filters the missing values
df2 = df[df["drct"] >= 0]
direction = df2["drct"].values * mpunits("degree")
if "speed" in df2.columns:
speed = df2["speed"].values * wu["units"]
else:
speed = df2["sknt"].values * mpunits("knots")
if not hasattr(bins, "units"):
bins = wu["bins"] * wu["units"]
if level is not None:
bins = RAOB_BINS[units] * wu["units"]
if len(df2.index) < 5:
wp = WindrosePlot()
wp.ax.text(
0.5,
0.5,
"Not Enough Data For Plot.",
ha="center",
transform=wp.ax.transAxes,
)
return wp.fig
wp = plot(
direction,
speed,
bins=bins,
nsector=nsector,
rmax=rmax,
cmap=kwargs.get("cmap"),
)
# Now we put some fancy debugging info on the plot
tlimit = "[Time Domain: "
if len(hours) == 24 and len(months) == 12:
tlimit = ""
if len(hours) < 24:
if len(hours) > 4:
tlimit += "%s-%s" % (
datetime(2000, 1, 1, hours[0]).strftime("%-I %p"),
datetime(2000, 1, 1, hours[-1]).strftime("%-I %p"),
)
else:
for h in hours:
tlimit += "%s," % (datetime(2000, 1, 1, h).strftime("%-I %p"),)
if len(months) < 12:
for h in months:
tlimit += "%s," % (datetime(2000, h, 1).strftime("%b"),)
if tlimit != "":
tlimit += "]"
label = ("[%s] %s%s\n" "Windrose Plot %s\n" "Time Bounds: %s") % (
station,
sname if sname is not None else "((%s))" % (station,),
"" if level is None else " @%s hPa" % (level,),
tlimit,
_time_domain_string(df, tzname),
)
fitbox(wp.fig, label, 0.14, 0.99, 0.92, 0.99, ha="left")
label = ("Summary\nobs count: %s\nMissing: %s\nAvg Speed: %.1f %s") % (
len(df.index),
len(df.index) - len(df2.index),
speed.m.mean(),
units,
)
wp.fig.text(0.96, 0.11, label, ha="right", fontsize=14)
if not kwargs.get("nogenerated", False):
wp.fig.text(
0.02,
0.1,
"Generated: %s" % (datetime.now().strftime("%d %b %Y"),),
verticalalignment="bottom",
fontsize=14,
)
# Denote the direction blowing from
lbl = ("Calm values are < %.1f %s\nArrows indicate wind direction.") % (
bins.m[0],
units,
)
wp.fig.text(0.02, 0.125, lbl, va="bottom")
return wp.fig
def windrose(
station,
database="asos",
months=np.arange(1, 13),
hours=np.arange(0, 24),
sts=datetime(1970, 1, 1),
ets=datetime(2050, 1, 1),
units="mph",
nsector=36,
justdata=False,
rmax=None,
sname=None,
sknt=None,
drct=None,
valid=None,
level=None,
bins=None,
tzname=None,
**kwargs,
):
"""Utility function that generates a windrose plot
Args:
station (str): station identifier to search database for
database (str,optional): database name to look for data within
months (list,optional): optional list of months to limit plot to
hours (list,optional): optional list of hours to limit plot to
sts (datetime,optional): start datetime
ets (datetime,optional): end datetime
units (str,optional): units to plot values as
nsector (int,optional): number of bins to devide the windrose into
justdata (boolean,optional): if True, write out the data only
sname (str,optional): The name of this station, if not specified it will
default to the ((`station`)) identifier
sknt (list,optional): A list of wind speeds in knots already generated
drct (list,optional): A list of wind directions (deg N) already generated
valid (list,optional): A list of valid datetimes (with tzinfo set)
level (int,optional): In case of RAOB, which level interests us (hPa)
bins (list,optional): bins to use for the wind speed
tzname (str,optional): Time zone to use for the plot.
cmap (cmap,optional): Matplotlib colormap to pass to barplot.
Returns:
matplotlib.Figure instance or textdata
"""
monthinfo = _get_timeinfo(months, "month", 12, tzname)
hourinfo = _get_timeinfo(hours, "hour", 24, tzname)
wu = WINDUNITS[units]
if sknt is None or drct is None:
df = _get_data(station, database, sts, ets, monthinfo, hourinfo, level)
else:
df = pd.DataFrame({"sknt": sknt, "drct": drct, "valid": valid})
# Make sure our bins have units
if not hasattr(bins, "units") and bins:
bins = bins * wu["units"]
# Convert wind speed into the units we want here
if df["sknt"].max() > 0:
df["speed"] = (df["sknt"].values * mpunits("knots")).to(wu["units"]).m
if justdata:
return _make_textresult(
station,
df,
units,
nsector,
sname,
monthinfo,
hourinfo,
level,
bins,
tzname,
)
return _make_plot(
station,
df,
units,
nsector,
rmax,
hours,
months,
sname,
level,
bins,
tzname,
**kwargs,
)
| """util script to call `windrose` package"""
from datetime import datetime, timezone
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
import numpy as np
import pandas as pd
from pandas.io.sql import read_sql
from metpy.units import units as mpunits
from pyiem.plot.util import fitbox
from pyiem.plot.windrose import histogram, plot, WindrosePlot
from pyiem.util import get_dbconn
from pyiem.network import Table as NetworkTable
WINDUNITS = {
"mph": {
"label": "miles per hour",
"units": mpunits("mph"),
"bins": (2, 5, 7, 10, 15, 20),
},
"kts": {
"label": "knots",
"units": mpunits("knots"),
"bins": (2, 5, 7, 10, 15, 20),
},
"mps": {
"label": "meters per second",
"units": mpunits("meter / second"),
"bins": (1, 4, 6, 8, 10, 12),
},
"kph": {
"label": "kilometers per hour",
"units": mpunits("kilometer / hour"),
"bins": (4, 10, 14, 20, 30, 40),
},
}
RAOB_BINS = {
"mph": [2, 25, 50, 75, 100, 150],
"kts": [2, 25, 50, 75, 100, 15],
"mps": [1, 10, 15, 25, 50, 75],
"kph": [4, 10, 14, 20, 30, 40],
}
def _get_timeinfo(arr, datepart, fullsize, tzname):
"""Convert the months/hours array provided into label text and SQL
Args:
arr (list): A list of ints
datepart (str): the part to extract from the database timestamp
fullsize (int): the size of specifying all dates
tzname (str): The timezone to compute this limiter in.
Returns:
dict with keys `sqltext` and `labeltext`
"""
sql = ""
lbl = "All included"
if len(arr) == 1:
sql = " and extract(%s from valid%s) = %s " % (
datepart,
"" if tzname is None else " at time zone '%s'" % (tzname,),
arr[0],
)
lbl = str(tuple(arr))
elif len(arr) < fullsize:
sql = (" and extract(%s from valid%s) in %s ") % (
datepart,
"" if tzname is None else " at time zone '%s'" % (tzname,),
(str(tuple(arr))).replace("'", ""),
)
lbl = str(tuple(arr))
return dict(sqltext=sql, labeltext=lbl)
def _get_data(station, database, sts, ets, monthinfo, hourinfo, level):
"""Helper function to get data out of IEM databases
Args:
station (str): the station identifier
database (str): the name of the database to connect to, we assume we can
then query a table called `alldata`
sts (datetime): the floor to query data for
ets (datetime): the ceiling to query data for
monthinfo (dict): information on how to query for months
hourinfo (dict): information on how to query for hours
level (int): in case of RAOB, which pressure level (hPa)
Returns:
pandas.DataFrame of the data
"""
# Query observations
db = get_dbconn(database, user="nobody")
rlimiter = ""
if database == "asos":
rlimiter = " and report_type = 2 "
sql = (
"SELECT sknt, drct, valid at time zone 'UTC' as valid "
f"from alldata WHERE station = '{station}' "
f"and valid > '{sts}' and valid < '{ets}' {monthinfo['sqltext']} "
f"{hourinfo['sqltext']} {rlimiter}"
)
if level is not None: # HACK!
db = get_dbconn("raob")
# here comes another hack, stations with starting with _ are virtual
stations = [station, "ZZZZ"]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = (
nt.sts.get(station, {})
.get("name", "X--YYY ZZZ")
.split("--")[1]
.strip()
.split(" ")
)
sql = """SELECT p.smps * 1.94384 as sknt, p.drct,
f.valid at time zone 'UTC' as valid from
raob_flights f JOIN raob_profile p on (f.fid = p.fid) WHERE
f.station in %s and p.pressure = %s and p.smps is not null
and p.drct is not null and valid >= '%s' and valid < '%s'
%s
%s
""" % (
str(tuple(stations)),
level,
sts,
ets,
monthinfo["sqltext"],
hourinfo["sqltext"],
)
df = read_sql(sql, db, index_col=None)
if not df.empty:
# Make valid column timezone aware
df["valid"] = df["valid"].dt.tz_localize(timezone.utc)
# If sknt or drct are null, we want to set the other to null as well
df.loc[pd.isnull(df["drct"]), "sknt"] = None
df.loc[pd.isnull(df["sknt"]), "drct"] = None
return df
def _make_textresult(
station,
df,
units,
nsector,
sname,
monthinfo,
hourinfo,
level,
bins,
tzname,
):
"""Generate a text table of windrose information
Args:
station (str): the station identifier
df (pd.DataFrame): The dataframe with data
units (str): the units of the `sknt` values
nsector (int): number of sectors to divide rose into
sname (str): the station name
monthinfo (dict): information on month limiting
hourinfo (dict): information on hour limiting
level (int): in case of RAOB, which level do we care for
bins (list): values to bin the wind speeds
tzname (str): Time zone for the report.
Returns:
str of information"""
if df.empty:
return "No Data Found"
wu = WINDUNITS[units]
if not hasattr(bins, "units"):
bins = wu["bins"] * wu["units"]
if level is not None:
bins = RAOB_BINS[units] * wu["units"]
# Effectively filters out the nulls
df2 = df[df["drct"] >= 0]
speed = df2["sknt"].values * mpunits("knots")
direction = df2["drct"].values * mpunits("degree")
calm_percent, dir_centers, table = histogram(
speed, direction, bins, nsector
)
res = ("# Windrose Data Table (Percent Frequency) " "for %s (%s)\n") % (
sname if sname is not None else "((%s))" % (station,),
station,
)
res += ("# Observations Used/Missing/Total: %s/%s/%s\n") % (
len(df2.index),
len(df.index) - len(df2.index),
len(df.index),
)
res += f"# {_time_domain_string(df, tzname)}\n"
res += "# Hour Limiter: %s\n" % (hourinfo["labeltext"],)
res += "# Month Limiter: %s\n" % (monthinfo["labeltext"],)
res += "# Wind Speed Units: %s\n" % (wu["label"],)
if level is not None:
res += "# RAOB Pressure (hPa) Level: %s\n" % (level,)
res += ("# Generated %s UTC, contact: <EMAIL>\n") % (
datetime.utcnow().strftime("%d %b %Y %H:%M"),
)
res += "# First value in table is CALM\n"
cols = ["Direction", "Calm"]
# Print out Speed Bins
for i, val in enumerate(bins.m):
maxval = (
"+"
if i == bins.m.shape[0] - 1
else " %4.1f" % (bins.m[i + 1] - 0.1,)
)
cols.append("%4.1f%s" % (val, maxval))
delta = dir_centers.m[1] - dir_centers.m[0]
res += ",".join(["%9s" % (c,) for c in cols]) + "\n"
for i, val in enumerate(dir_centers.m):
minval = val - delta / 2.0
if minval < 0:
minval += 360.0
maxval = np.min([360, val + delta / 2.0 - 1])
res += "%03i-%03i ,%9s," % (
minval,
maxval,
np.round(calm_percent.m, 2) if i == 0 else "",
)
res += ",".join(
["%9.3f" % (table.m[i, j],) for j in range(bins.m.shape[0])]
)
res += "\n"
return res
def _time_domain_string(df, tzname):
"""Custom time label option."""
sts = df["valid"].min().to_pydatetime()
ets = df["valid"].max().to_pydatetime()
timeformat = "%d %b %Y %I:%M %p"
if tzname is not None:
sts = sts.astimezone(ZoneInfo(tzname))
ets = ets.astimezone(ZoneInfo(tzname))
if tzname == "UTC":
timeformat = "%d %b %Y %H:%M"
return "%s - %s %s" % (
sts.strftime(timeformat),
ets.strftime(timeformat),
"" if tzname is None else tzname,
)
def _make_plot(
station,
df,
units,
nsector,
rmax,
hours,
months,
sname,
level,
bins,
tzname,
**kwargs,
):
"""Generate a matplotlib windrose plot
Args:
station (str): station identifier
df (pd.DataFrame): observations
drct (list): list of wind directions
units (str): units of wind speed
nsector (int): number of bins to use for windrose
rmax (float): radius of the plot
hours (list): hour limit for plot
month (list): month limit for plot
sname (str): station name
level (int): RAOB level in hPa of interest
bins (list): values for binning the wind speeds
tzname (str): Time zone this plot is produced in.
cmap (colormap): Matplotlib colormap to use.
Returns:
matplotlib.Figure
"""
wu = WINDUNITS[units]
# Filters the missing values
df2 = df[df["drct"] >= 0]
direction = df2["drct"].values * mpunits("degree")
if "speed" in df2.columns:
speed = df2["speed"].values * wu["units"]
else:
speed = df2["sknt"].values * mpunits("knots")
if not hasattr(bins, "units"):
bins = wu["bins"] * wu["units"]
if level is not None:
bins = RAOB_BINS[units] * wu["units"]
if len(df2.index) < 5:
wp = WindrosePlot()
wp.ax.text(
0.5,
0.5,
"Not Enough Data For Plot.",
ha="center",
transform=wp.ax.transAxes,
)
return wp.fig
wp = plot(
direction,
speed,
bins=bins,
nsector=nsector,
rmax=rmax,
cmap=kwargs.get("cmap"),
)
# Now we put some fancy debugging info on the plot
tlimit = "[Time Domain: "
if len(hours) == 24 and len(months) == 12:
tlimit = ""
if len(hours) < 24:
if len(hours) > 4:
tlimit += "%s-%s" % (
datetime(2000, 1, 1, hours[0]).strftime("%-I %p"),
datetime(2000, 1, 1, hours[-1]).strftime("%-I %p"),
)
else:
for h in hours:
tlimit += "%s," % (datetime(2000, 1, 1, h).strftime("%-I %p"),)
if len(months) < 12:
for h in months:
tlimit += "%s," % (datetime(2000, h, 1).strftime("%b"),)
if tlimit != "":
tlimit += "]"
label = ("[%s] %s%s\n" "Windrose Plot %s\n" "Time Bounds: %s") % (
station,
sname if sname is not None else "((%s))" % (station,),
"" if level is None else " @%s hPa" % (level,),
tlimit,
_time_domain_string(df, tzname),
)
fitbox(wp.fig, label, 0.14, 0.99, 0.92, 0.99, ha="left")
label = ("Summary\nobs count: %s\nMissing: %s\nAvg Speed: %.1f %s") % (
len(df.index),
len(df.index) - len(df2.index),
speed.m.mean(),
units,
)
wp.fig.text(0.96, 0.11, label, ha="right", fontsize=14)
if not kwargs.get("nogenerated", False):
wp.fig.text(
0.02,
0.1,
"Generated: %s" % (datetime.now().strftime("%d %b %Y"),),
verticalalignment="bottom",
fontsize=14,
)
# Denote the direction blowing from
lbl = ("Calm values are < %.1f %s\nArrows indicate wind direction.") % (
bins.m[0],
units,
)
wp.fig.text(0.02, 0.125, lbl, va="bottom")
return wp.fig
def windrose(
station,
database="asos",
months=np.arange(1, 13),
hours=np.arange(0, 24),
sts=datetime(1970, 1, 1),
ets=datetime(2050, 1, 1),
units="mph",
nsector=36,
justdata=False,
rmax=None,
sname=None,
sknt=None,
drct=None,
valid=None,
level=None,
bins=None,
tzname=None,
**kwargs,
):
"""Utility function that generates a windrose plot
Args:
station (str): station identifier to search database for
database (str,optional): database name to look for data within
months (list,optional): optional list of months to limit plot to
hours (list,optional): optional list of hours to limit plot to
sts (datetime,optional): start datetime
ets (datetime,optional): end datetime
units (str,optional): units to plot values as
nsector (int,optional): number of bins to devide the windrose into
justdata (boolean,optional): if True, write out the data only
sname (str,optional): The name of this station, if not specified it will
default to the ((`station`)) identifier
sknt (list,optional): A list of wind speeds in knots already generated
drct (list,optional): A list of wind directions (deg N) already generated
valid (list,optional): A list of valid datetimes (with tzinfo set)
level (int,optional): In case of RAOB, which level interests us (hPa)
bins (list,optional): bins to use for the wind speed
tzname (str,optional): Time zone to use for the plot.
cmap (cmap,optional): Matplotlib colormap to pass to barplot.
Returns:
matplotlib.Figure instance or textdata
"""
monthinfo = _get_timeinfo(months, "month", 12, tzname)
hourinfo = _get_timeinfo(hours, "hour", 24, tzname)
wu = WINDUNITS[units]
if sknt is None or drct is None:
df = _get_data(station, database, sts, ets, monthinfo, hourinfo, level)
else:
df = pd.DataFrame({"sknt": sknt, "drct": drct, "valid": valid})
# Make sure our bins have units
if not hasattr(bins, "units") and bins:
bins = bins * wu["units"]
# Convert wind speed into the units we want here
if df["sknt"].max() > 0:
df["speed"] = (df["sknt"].values * mpunits("knots")).to(wu["units"]).m
if justdata:
return _make_textresult(
station,
df,
units,
nsector,
sname,
monthinfo,
hourinfo,
level,
bins,
tzname,
)
return _make_plot(
station,
df,
units,
nsector,
rmax,
hours,
months,
sname,
level,
bins,
tzname,
**kwargs,
) | en | 0.686466 | util script to call `windrose` package Convert the months/hours array provided into label text and SQL Args: arr (list): A list of ints datepart (str): the part to extract from the database timestamp fullsize (int): the size of specifying all dates tzname (str): The timezone to compute this limiter in. Returns: dict with keys `sqltext` and `labeltext` Helper function to get data out of IEM databases Args: station (str): the station identifier database (str): the name of the database to connect to, we assume we can then query a table called `alldata` sts (datetime): the floor to query data for ets (datetime): the ceiling to query data for monthinfo (dict): information on how to query for months hourinfo (dict): information on how to query for hours level (int): in case of RAOB, which pressure level (hPa) Returns: pandas.DataFrame of the data # Query observations # HACK! # here comes another hack, stations with starting with _ are virtual SELECT p.smps * 1.94384 as sknt, p.drct, f.valid at time zone 'UTC' as valid from raob_flights f JOIN raob_profile p on (f.fid = p.fid) WHERE f.station in %s and p.pressure = %s and p.smps is not null and p.drct is not null and valid >= '%s' and valid < '%s' %s %s # Make valid column timezone aware # If sknt or drct are null, we want to set the other to null as well Generate a text table of windrose information Args: station (str): the station identifier df (pd.DataFrame): The dataframe with data units (str): the units of the `sknt` values nsector (int): number of sectors to divide rose into sname (str): the station name monthinfo (dict): information on month limiting hourinfo (dict): information on hour limiting level (int): in case of RAOB, which level do we care for bins (list): values to bin the wind speeds tzname (str): Time zone for the report. Returns: str of information # Effectively filters out the nulls # Print out Speed Bins Custom time label option. Generate a matplotlib windrose plot Args: station (str): station identifier df (pd.DataFrame): observations drct (list): list of wind directions units (str): units of wind speed nsector (int): number of bins to use for windrose rmax (float): radius of the plot hours (list): hour limit for plot month (list): month limit for plot sname (str): station name level (int): RAOB level in hPa of interest bins (list): values for binning the wind speeds tzname (str): Time zone this plot is produced in. cmap (colormap): Matplotlib colormap to use. Returns: matplotlib.Figure # Filters the missing values # Now we put some fancy debugging info on the plot # Denote the direction blowing from Utility function that generates a windrose plot Args: station (str): station identifier to search database for database (str,optional): database name to look for data within months (list,optional): optional list of months to limit plot to hours (list,optional): optional list of hours to limit plot to sts (datetime,optional): start datetime ets (datetime,optional): end datetime units (str,optional): units to plot values as nsector (int,optional): number of bins to devide the windrose into justdata (boolean,optional): if True, write out the data only sname (str,optional): The name of this station, if not specified it will default to the ((`station`)) identifier sknt (list,optional): A list of wind speeds in knots already generated drct (list,optional): A list of wind directions (deg N) already generated valid (list,optional): A list of valid datetimes (with tzinfo set) level (int,optional): In case of RAOB, which level interests us (hPa) bins (list,optional): bins to use for the wind speed tzname (str,optional): Time zone to use for the plot. cmap (cmap,optional): Matplotlib colormap to pass to barplot. Returns: matplotlib.Figure instance or textdata # Make sure our bins have units # Convert wind speed into the units we want here | 2.837629 | 3 |
coreset/utils/partition_recursive.py | ernestosanches/Decision-Trees-Coreset | 2 | 6618136 | <gh_stars>1-10
'''
Optimized partitioning algorithms for high-dimensional signal data.
Optimized 2-dimensional algorithms are found in partition_2d.py
*******************************************************************************
MIT License
Copyright (c) 2021 <NAME>, <NAME>,
<NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************
'''
####################################### NOTES #################################
# - Please cite our paper when using the code:
# "Coresets for Decision Trees of Signals" (NeurIPS'21)
# <NAME>, <NAME>,
# <NAME>, <NAME>
#
###############################################################################
import numpy as np
from coreset.utils.running_stats import RunningStats
def get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition, calculate_stats,
verbose=False):
'''
Splits data into subslices.
Parameters:
data, dim: input data
func_on_slice: data --> list of new_slices
Function for further splitting a current slice.
Can be used to recursively split on the remaining dimensions.
condition_on_slice: (data, return_from_func, stats) -> bool
Function returns a boolean split condition, after reaching
such condition a new slice should start.
allow_over_condition: allows to have slices with last element
included, which made the slice to not satisfy the condition.
calculate_stats: keeps calculating running_stats of each slice
Returns:
list of slices resulting from splitting the data based on condition.
'''
n, d = data.get_shape()
if n == 1:
# only one element is in the data, returning a single slice
# containing it
return [data]
if func_on_slice is None:
# No operations are done on a slice if the function is None.
# Transforming a slice into a single-element list containing
# itself, for use in the list.extend() method
func_on_slice = lambda data: [data]
# Ensuring that the data is sorted according to the current dimension.
data.set_curr_dim_and_sort(dim, stable=True)
n = data.size_on_dim()
# Iterating over blocks of elements of same value on given dimension
idx_start = idx_end = 0
is_curr_valid = is_prev_valid = False
result = []
rs = RunningStats() if calculate_stats else None
while idx_end < n:
# Current set of blocks of data
curr_data = data.get_slice(idx_start, idx_end + 1)
if calculate_stats:
incremented_data = data.get_slice(idx_end, idx_end + 1)
rs.add_slice(incremented_data.get_Y())
# Applying a tranformation on the currently considered data
func_result_curr = func_on_slice(curr_data)
# checking condition
condition = condition_on_slice(curr_data, func_result_curr, rs)
is_prev_valid = is_curr_valid
is_curr_valid = (condition is not None)
# helper variable to correctly determine over condition
curr_increment_len = 1
if condition:
if (allow_over_condition or idx_end == idx_start or
not is_prev_valid):
# if it is a slice of size 1, adding even if condition
# just happened but not if it is the last index
# (adding later in this case)
if idx_end + 1 != n:
result.extend(func_result_curr)
idx_start = idx_end + 1
if calculate_stats:
rs.clear()
else:
# previous index had false condition, adding it
#prev_slice = take(myslice, idx_start, idx_end , sortidx[dim])
prev_data = data.get_slice(idx_start, idx_end)
func_result_prev = func_on_slice(prev_data)
result.extend(func_result_prev)
idx_start = idx_end
if calculate_stats:
rs.clear()
curr_increment_len = 0
if idx_end + curr_increment_len == n and idx_start != n:
# this is the last index, adding a slice anyway
if is_curr_valid:
result.extend(func_result_curr)
idx_end += curr_increment_len
return result
def get_splits_valid(data, k, dim):
''' Splits the data using get_splits_condition where the condition
is a maximum number of valid elements in a slice '''
def condition_on_slice(data, ret, stats):
valid_count = data.valid.sum()
return None if valid_count == 0 else (valid_count > v / k)
v = data.valid.sum()
func_on_slice = None
slices = get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition=False,
calculate_stats=False)
for s in slices:
s.filter_valid()
return slices
def get_splits_variance(data, target_variance, dim):
''' Splits the data using get_splits_condition where the condition
is a maximum variance of a slice'''
def condition_on_slice(data, ret, stats):
return stats.s > target_variance
func_on_slice = None
return get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition=False,
calculate_stats=True)
def bicriteria(data, k):
''' Bicriteria approximation of an optimal k-segmentation '''
n, d = data.get_shape()
result = []
total_variance = 0
data.init_uid()
data.init_valid()
# While there are valid items; on each iteration half items are invalidated
while np.any(data.valid):
# Partition A into k^d block with equal number of valid items
# Go thtough each dimension and partition only 1 dimension at a time
Q = [data]
for dim in range(d):
P = []
# Take all current blocks for partitioning on dimension dim
for data_slice in Q:
new_slices = get_splits_valid(
data_slice, k, dim)
P.extend(new_slices)
Q = P
# compute variance of all blocks and add smallest half to Result
# mark all such blocks as invalid
# can use dense std_dev_valid
variances_raw = [(data_slice.std_dev_valid(), data_slice.uid)
for data_slice in Q]
variances = sorted(variances_raw, key=lambda x: x[0])
for variance, uid_slice in variances[:max(1, len(variances) // 2)]:
data.valid[uid_slice] = 0
result.append(uid_slice)
total_variance += variance
return result, total_variance
def balanced_partition_1d(data, gamma, sigma):
''' Balanced partition algorithm on a single dimension
If data is high-dimensional, splits are done on the last dimension in
the shape of the data. Splits on last dimension are done according
to condition of a maximum allowed variance of a slice '''
n, d = data.get_shape()
target_variance = gamma ** d * sigma
dim = d-1
new_slices = get_splits_variance(data, target_variance, dim)
return new_slices
def balanced_partition(data, gamma, sigma, d_start=0):
''' Balanced partition algorithm on high-dimensional data.
Splits on all dimensions are done according
to condition of a maximum allowed valid elements in a slice.
Only on the last dimension the 1-d splitting is done based on a
condition of maximum variance of a slice '''
n, d = data.get_shape()
needed = 1 / (gamma ** (d-d_start-1))
if n <= needed:
# no point in algorithmic splitting as we will get single points anyway
return data.get_single_points_split()
if d_start == d - 1:
return balanced_partition_1d(data, gamma, sigma)
func_on_slice = lambda data: balanced_partition(
data, gamma, sigma, d_start + 1)
condition_on_slice = (lambda data, ret, stats:
len(ret) > needed)
new_slices = get_splits_condition(
data, func_on_slice, condition_on_slice, d_start,
allow_over_condition=False, calculate_stats=False)
return new_slices
| '''
Optimized partitioning algorithms for high-dimensional signal data.
Optimized 2-dimensional algorithms are found in partition_2d.py
*******************************************************************************
MIT License
Copyright (c) 2021 <NAME>, <NAME>,
<NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************
'''
####################################### NOTES #################################
# - Please cite our paper when using the code:
# "Coresets for Decision Trees of Signals" (NeurIPS'21)
# <NAME>, <NAME>,
# <NAME>, <NAME>
#
###############################################################################
import numpy as np
from coreset.utils.running_stats import RunningStats
def get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition, calculate_stats,
verbose=False):
'''
Splits data into subslices.
Parameters:
data, dim: input data
func_on_slice: data --> list of new_slices
Function for further splitting a current slice.
Can be used to recursively split on the remaining dimensions.
condition_on_slice: (data, return_from_func, stats) -> bool
Function returns a boolean split condition, after reaching
such condition a new slice should start.
allow_over_condition: allows to have slices with last element
included, which made the slice to not satisfy the condition.
calculate_stats: keeps calculating running_stats of each slice
Returns:
list of slices resulting from splitting the data based on condition.
'''
n, d = data.get_shape()
if n == 1:
# only one element is in the data, returning a single slice
# containing it
return [data]
if func_on_slice is None:
# No operations are done on a slice if the function is None.
# Transforming a slice into a single-element list containing
# itself, for use in the list.extend() method
func_on_slice = lambda data: [data]
# Ensuring that the data is sorted according to the current dimension.
data.set_curr_dim_and_sort(dim, stable=True)
n = data.size_on_dim()
# Iterating over blocks of elements of same value on given dimension
idx_start = idx_end = 0
is_curr_valid = is_prev_valid = False
result = []
rs = RunningStats() if calculate_stats else None
while idx_end < n:
# Current set of blocks of data
curr_data = data.get_slice(idx_start, idx_end + 1)
if calculate_stats:
incremented_data = data.get_slice(idx_end, idx_end + 1)
rs.add_slice(incremented_data.get_Y())
# Applying a tranformation on the currently considered data
func_result_curr = func_on_slice(curr_data)
# checking condition
condition = condition_on_slice(curr_data, func_result_curr, rs)
is_prev_valid = is_curr_valid
is_curr_valid = (condition is not None)
# helper variable to correctly determine over condition
curr_increment_len = 1
if condition:
if (allow_over_condition or idx_end == idx_start or
not is_prev_valid):
# if it is a slice of size 1, adding even if condition
# just happened but not if it is the last index
# (adding later in this case)
if idx_end + 1 != n:
result.extend(func_result_curr)
idx_start = idx_end + 1
if calculate_stats:
rs.clear()
else:
# previous index had false condition, adding it
#prev_slice = take(myslice, idx_start, idx_end , sortidx[dim])
prev_data = data.get_slice(idx_start, idx_end)
func_result_prev = func_on_slice(prev_data)
result.extend(func_result_prev)
idx_start = idx_end
if calculate_stats:
rs.clear()
curr_increment_len = 0
if idx_end + curr_increment_len == n and idx_start != n:
# this is the last index, adding a slice anyway
if is_curr_valid:
result.extend(func_result_curr)
idx_end += curr_increment_len
return result
def get_splits_valid(data, k, dim):
''' Splits the data using get_splits_condition where the condition
is a maximum number of valid elements in a slice '''
def condition_on_slice(data, ret, stats):
valid_count = data.valid.sum()
return None if valid_count == 0 else (valid_count > v / k)
v = data.valid.sum()
func_on_slice = None
slices = get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition=False,
calculate_stats=False)
for s in slices:
s.filter_valid()
return slices
def get_splits_variance(data, target_variance, dim):
''' Splits the data using get_splits_condition where the condition
is a maximum variance of a slice'''
def condition_on_slice(data, ret, stats):
return stats.s > target_variance
func_on_slice = None
return get_splits_condition(data,
func_on_slice, condition_on_slice, dim,
allow_over_condition=False,
calculate_stats=True)
def bicriteria(data, k):
''' Bicriteria approximation of an optimal k-segmentation '''
n, d = data.get_shape()
result = []
total_variance = 0
data.init_uid()
data.init_valid()
# While there are valid items; on each iteration half items are invalidated
while np.any(data.valid):
# Partition A into k^d block with equal number of valid items
# Go thtough each dimension and partition only 1 dimension at a time
Q = [data]
for dim in range(d):
P = []
# Take all current blocks for partitioning on dimension dim
for data_slice in Q:
new_slices = get_splits_valid(
data_slice, k, dim)
P.extend(new_slices)
Q = P
# compute variance of all blocks and add smallest half to Result
# mark all such blocks as invalid
# can use dense std_dev_valid
variances_raw = [(data_slice.std_dev_valid(), data_slice.uid)
for data_slice in Q]
variances = sorted(variances_raw, key=lambda x: x[0])
for variance, uid_slice in variances[:max(1, len(variances) // 2)]:
data.valid[uid_slice] = 0
result.append(uid_slice)
total_variance += variance
return result, total_variance
def balanced_partition_1d(data, gamma, sigma):
''' Balanced partition algorithm on a single dimension
If data is high-dimensional, splits are done on the last dimension in
the shape of the data. Splits on last dimension are done according
to condition of a maximum allowed variance of a slice '''
n, d = data.get_shape()
target_variance = gamma ** d * sigma
dim = d-1
new_slices = get_splits_variance(data, target_variance, dim)
return new_slices
def balanced_partition(data, gamma, sigma, d_start=0):
''' Balanced partition algorithm on high-dimensional data.
Splits on all dimensions are done according
to condition of a maximum allowed valid elements in a slice.
Only on the last dimension the 1-d splitting is done based on a
condition of maximum variance of a slice '''
n, d = data.get_shape()
needed = 1 / (gamma ** (d-d_start-1))
if n <= needed:
# no point in algorithmic splitting as we will get single points anyway
return data.get_single_points_split()
if d_start == d - 1:
return balanced_partition_1d(data, gamma, sigma)
func_on_slice = lambda data: balanced_partition(
data, gamma, sigma, d_start + 1)
condition_on_slice = (lambda data, ret, stats:
len(ret) > needed)
new_slices = get_splits_condition(
data, func_on_slice, condition_on_slice, d_start,
allow_over_condition=False, calculate_stats=False)
return new_slices | en | 0.766961 | Optimized partitioning algorithms for high-dimensional signal data.
Optimized 2-dimensional algorithms are found in partition_2d.py
*******************************************************************************
MIT License
Copyright (c) 2021 <NAME>, <NAME>,
<NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
******************************************************************************* ####################################### NOTES ################################# # - Please cite our paper when using the code: # "Coresets for Decision Trees of Signals" (NeurIPS'21) # <NAME>, <NAME>, # <NAME>, <NAME> # ############################################################################### Splits data into subslices.
Parameters:
data, dim: input data
func_on_slice: data --> list of new_slices
Function for further splitting a current slice.
Can be used to recursively split on the remaining dimensions.
condition_on_slice: (data, return_from_func, stats) -> bool
Function returns a boolean split condition, after reaching
such condition a new slice should start.
allow_over_condition: allows to have slices with last element
included, which made the slice to not satisfy the condition.
calculate_stats: keeps calculating running_stats of each slice
Returns:
list of slices resulting from splitting the data based on condition. # only one element is in the data, returning a single slice # containing it # No operations are done on a slice if the function is None. # Transforming a slice into a single-element list containing # itself, for use in the list.extend() method # Ensuring that the data is sorted according to the current dimension. # Iterating over blocks of elements of same value on given dimension # Current set of blocks of data # Applying a tranformation on the currently considered data # checking condition # helper variable to correctly determine over condition # if it is a slice of size 1, adding even if condition # just happened but not if it is the last index # (adding later in this case) # previous index had false condition, adding it #prev_slice = take(myslice, idx_start, idx_end , sortidx[dim]) # this is the last index, adding a slice anyway Splits the data using get_splits_condition where the condition
is a maximum number of valid elements in a slice Splits the data using get_splits_condition where the condition
is a maximum variance of a slice Bicriteria approximation of an optimal k-segmentation # While there are valid items; on each iteration half items are invalidated # Partition A into k^d block with equal number of valid items # Go thtough each dimension and partition only 1 dimension at a time # Take all current blocks for partitioning on dimension dim # compute variance of all blocks and add smallest half to Result # mark all such blocks as invalid # can use dense std_dev_valid Balanced partition algorithm on a single dimension
If data is high-dimensional, splits are done on the last dimension in
the shape of the data. Splits on last dimension are done according
to condition of a maximum allowed variance of a slice Balanced partition algorithm on high-dimensional data.
Splits on all dimensions are done according
to condition of a maximum allowed valid elements in a slice.
Only on the last dimension the 1-d splitting is done based on a
condition of maximum variance of a slice # no point in algorithmic splitting as we will get single points anyway | 1.922037 | 2 |
smoke_test/constants.py | ZEROFAIL/zrpc_smoke_test | 0 | 6618137 | LOG_FORMAT = "%(asctime)s | %(process)6d | %(name)12s | %(levelname)8s | %(funcName)s : %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
| LOG_FORMAT = "%(asctime)s | %(process)6d | %(name)12s | %(levelname)8s | %(funcName)s : %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
| none | 1 | 1.389789 | 1 | |
sameproject/sdk/importer.py | SAME-Project/same-project | 8 | 6618138 | <filename>sameproject/sdk/importer.py
from johnnydep.lib import JohnnyDist
from .conda_env import CondaEnv
from typing import Tuple
from pathlib import Path
import pkg_resources
import sameproject
import subprocess
import importlib
import logging
import sys
default_conda = """
name: NULL_NAME
dependencies:
- NULL_PACKAGE
"""
def import_packages(packages_to_import, update_conda_env=False, conda_env_path="environment.yml", python_executable=None):
if python_executable is None:
python_executable = sys.executable
if isinstance(packages_to_import, str):
packages_to_import = [packages_to_import]
new_packages, already_installed_packages = get_packages_to_install(packages_to_import)
already_installed_list = build_already_installed_package_notice(already_installed_packages)
new_packages_list = build_new_packages_notice(python_executable, new_packages)
for package in already_installed_list + new_packages_list:
package_name = ""
try:
package_name, package_version = package.split("==")
except ValueError:
# not enough values
package_name, package_version = package, "" # noqa F841 - package version unused
importlib.import_module(package_name)
if update_conda_env:
_update_conda_env(conda_env_path)
def build_new_packages_notice(python_executable, new_packages) -> list:
new_packages_list = []
for package_name, package_version in new_packages.items():
new_package_string = ""
if package_version != "":
new_package_string = f"{package_name}=={package_version}"
else:
new_package_string = f"{package_name}"
_install_package(python_executable, new_package_string)
new_packages_list.append(new_package_string)
if len(new_packages_list) > 0:
print("Packages installed: %v", ", ".join(new_packages_list))
return new_packages_list
def build_already_installed_package_notice(already_installed_packages) -> list:
already_installed_list = []
for package_name, package_version in already_installed_packages.items():
if package_version != "":
already_installed_list.append(f"{package_name}=={package_version}")
else:
already_installed_list.append(f"{package_name}")
print(f'Packages skipped because they are already installed: {", ".join(already_installed_list)}')
return already_installed_list
def get_packages_to_install(packages_to_import) -> Tuple[dict, dict]:
new_packages = {}
already_installed_packages = {}
for package in packages_to_import:
try:
package_name, package_version = package.split("==")
except ValueError:
# not enough values
package_name, package_version = package, ""
installed_package_dist = JohnnyDist(package_name)
if package_version == "":
if installed_package_dist.version_latest == installed_package_dist.version_installed:
already_installed_packages[package_name] = installed_package_dist.version_latest
else:
new_packages[package_name] = installed_package_dist.version_latest
return (new_packages, already_installed_packages)
def _install_package(python_executable, package):
subprocess.check_call(
[python_executable, "-m", "pip", "install", package],
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
def _update_conda_env(file_path):
try:
with open(file_path, "rb") as f:
conda_env = CondaEnv(buffered_reader=f)
except FileNotFoundError:
conda_env_path = "environment.yaml"
conda_env_path_object = Path(conda_env_path).absolute
logging.info(f"No file found at '{conda_env_path_object}', creating one.")
conda_env = CondaEnv(content=default_conda)
conda_env.name = sameproject.sdk.helpers.ipy_nb_name()
conda_env.dependencies = []
conda_dependencies = {}
for dependency_line in conda_env.dependencies:
package_name = dependency_line.split(r"=+")
conda_dependencies[package_name] = dependency_line
installed_packages = pkg_resources.working_set
installed_packages_list = ["%s=%s" % (i.key, i.version) for i in installed_packages]
for package in installed_packages_list:
print(package)
# # This is pretty hacky, but not sure what to do in the alternative.
# # The problem is that I want there to be a same.yaml file if we write a conda file
# # But since this is the SDK, it shouldn't (?) be necessary to have one (yet).
# # So commenting out... for now.
# # try:
# # same_config_file_path = Path("same.yaml")
# # if not same_config_file_path.exists():
# # raise FileNotFoundError()
# # with open(same_config_file_path.absolute, "rb") as f:
# # same_config = SameConfig.from_yaml(f.read())
# # except FileNotFoundError:
# # logging.fatal("No SAME file found at 'same.yaml', please create one.")
| <filename>sameproject/sdk/importer.py
from johnnydep.lib import JohnnyDist
from .conda_env import CondaEnv
from typing import Tuple
from pathlib import Path
import pkg_resources
import sameproject
import subprocess
import importlib
import logging
import sys
default_conda = """
name: NULL_NAME
dependencies:
- NULL_PACKAGE
"""
def import_packages(packages_to_import, update_conda_env=False, conda_env_path="environment.yml", python_executable=None):
if python_executable is None:
python_executable = sys.executable
if isinstance(packages_to_import, str):
packages_to_import = [packages_to_import]
new_packages, already_installed_packages = get_packages_to_install(packages_to_import)
already_installed_list = build_already_installed_package_notice(already_installed_packages)
new_packages_list = build_new_packages_notice(python_executable, new_packages)
for package in already_installed_list + new_packages_list:
package_name = ""
try:
package_name, package_version = package.split("==")
except ValueError:
# not enough values
package_name, package_version = package, "" # noqa F841 - package version unused
importlib.import_module(package_name)
if update_conda_env:
_update_conda_env(conda_env_path)
def build_new_packages_notice(python_executable, new_packages) -> list:
new_packages_list = []
for package_name, package_version in new_packages.items():
new_package_string = ""
if package_version != "":
new_package_string = f"{package_name}=={package_version}"
else:
new_package_string = f"{package_name}"
_install_package(python_executable, new_package_string)
new_packages_list.append(new_package_string)
if len(new_packages_list) > 0:
print("Packages installed: %v", ", ".join(new_packages_list))
return new_packages_list
def build_already_installed_package_notice(already_installed_packages) -> list:
already_installed_list = []
for package_name, package_version in already_installed_packages.items():
if package_version != "":
already_installed_list.append(f"{package_name}=={package_version}")
else:
already_installed_list.append(f"{package_name}")
print(f'Packages skipped because they are already installed: {", ".join(already_installed_list)}')
return already_installed_list
def get_packages_to_install(packages_to_import) -> Tuple[dict, dict]:
new_packages = {}
already_installed_packages = {}
for package in packages_to_import:
try:
package_name, package_version = package.split("==")
except ValueError:
# not enough values
package_name, package_version = package, ""
installed_package_dist = JohnnyDist(package_name)
if package_version == "":
if installed_package_dist.version_latest == installed_package_dist.version_installed:
already_installed_packages[package_name] = installed_package_dist.version_latest
else:
new_packages[package_name] = installed_package_dist.version_latest
return (new_packages, already_installed_packages)
def _install_package(python_executable, package):
subprocess.check_call(
[python_executable, "-m", "pip", "install", package],
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
def _update_conda_env(file_path):
try:
with open(file_path, "rb") as f:
conda_env = CondaEnv(buffered_reader=f)
except FileNotFoundError:
conda_env_path = "environment.yaml"
conda_env_path_object = Path(conda_env_path).absolute
logging.info(f"No file found at '{conda_env_path_object}', creating one.")
conda_env = CondaEnv(content=default_conda)
conda_env.name = sameproject.sdk.helpers.ipy_nb_name()
conda_env.dependencies = []
conda_dependencies = {}
for dependency_line in conda_env.dependencies:
package_name = dependency_line.split(r"=+")
conda_dependencies[package_name] = dependency_line
installed_packages = pkg_resources.working_set
installed_packages_list = ["%s=%s" % (i.key, i.version) for i in installed_packages]
for package in installed_packages_list:
print(package)
# # This is pretty hacky, but not sure what to do in the alternative.
# # The problem is that I want there to be a same.yaml file if we write a conda file
# # But since this is the SDK, it shouldn't (?) be necessary to have one (yet).
# # So commenting out... for now.
# # try:
# # same_config_file_path = Path("same.yaml")
# # if not same_config_file_path.exists():
# # raise FileNotFoundError()
# # with open(same_config_file_path.absolute, "rb") as f:
# # same_config = SameConfig.from_yaml(f.read())
# # except FileNotFoundError:
# # logging.fatal("No SAME file found at 'same.yaml', please create one.")
| en | 0.796845 | name: NULL_NAME dependencies: - NULL_PACKAGE # not enough values # noqa F841 - package version unused # not enough values # # This is pretty hacky, but not sure what to do in the alternative. # # The problem is that I want there to be a same.yaml file if we write a conda file # # But since this is the SDK, it shouldn't (?) be necessary to have one (yet). # # So commenting out... for now. # # try: # # same_config_file_path = Path("same.yaml") # # if not same_config_file_path.exists(): # # raise FileNotFoundError() # # with open(same_config_file_path.absolute, "rb") as f: # # same_config = SameConfig.from_yaml(f.read()) # # except FileNotFoundError: # # logging.fatal("No SAME file found at 'same.yaml', please create one.") | 2.38656 | 2 |
strategy/SL/DualAttnRNN/DualAttnRNN.py | georgezouq/Personae | 1 | 6618139 | import rqalpha
import os
import tensorflow as tf
import numpy as np
from rqalpha.api import *
from sklearn.preprocessing import MinMaxScaler
from strategy import config
from base.env.market import Market
from algorithm import config as alg_config
from algorithm.SL.DualAttnRNN import Algorithm
from checkpoints import CHECKPOINTS_DIR
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
context.s1 = '600036.XSHG'
update_universe(context.s1)
context.has_save_data = False
mode = 'run'
market = 'stock'
training_data_ratio = 0.9
train_steps = 30000
base = config.get('base')
codes = ['600036']
env = Market(codes, start_date=base.get('start_date'), end_date=base.get('end_date'), **{
"market": market,
"use_sequence": True,
"scaler": MinMaxScaler,
"mix_index_state": True,
"training_data_ratio": training_data_ratio
})
model_name = 'DualAttnRNN' # os.path.basename(__file__).split('.')[0]
context.bar_list_origin = []
context.bar_list = []
context.scale = MinMaxScaler()
context.algorithm = Algorithm(
tf.Session(config=alg_config), env, env.seq_length, env.data_dim, env.code_count, **{
"mode": mode,
"hidden_size": 5,
"enable_saver": True,
"train_steps": train_steps,
"enable_summary_writer": True,
"save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"),
}
)
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
pass
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
s1 = bar_dict[context.s1]
price = [s1.open, s1.high, s1.low, s1.close, s1.volume]
context.bar_list_origin.append(price)
scale = context.scale.fit(context.bar_list_origin)
price_scaled = scale.transform([price])
context.bar_list.append(price_scaled[0])
# if not enough bar
if len(context.bar_list) < context.algorithm.seq_length * 2 + 2:
return
# frm = len(context.bar_list)-context.algorithm.seq_length
x1 = context.bar_list[-context.algorithm.seq_length*2:-context.algorithm.seq_length]
x2 = context.bar_list[-context.algorithm.seq_length:]
x = [x1, x2]
c, a, _ = context.algorithm.predict(x)
res = np.append(_, [0, 0, 0, 0])
predict = scale.inverse_transform([res])
predict = scale.inverse_transform([res])
print('predict', predict)
pass
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
pass
rqalpha.run_func(init=init,
before_trading=before_trading,
handle_bar=handle_bar,
after_trading=after_trading,
config=config)
| import rqalpha
import os
import tensorflow as tf
import numpy as np
from rqalpha.api import *
from sklearn.preprocessing import MinMaxScaler
from strategy import config
from base.env.market import Market
from algorithm import config as alg_config
from algorithm.SL.DualAttnRNN import Algorithm
from checkpoints import CHECKPOINTS_DIR
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
context.s1 = '600036.XSHG'
update_universe(context.s1)
context.has_save_data = False
mode = 'run'
market = 'stock'
training_data_ratio = 0.9
train_steps = 30000
base = config.get('base')
codes = ['600036']
env = Market(codes, start_date=base.get('start_date'), end_date=base.get('end_date'), **{
"market": market,
"use_sequence": True,
"scaler": MinMaxScaler,
"mix_index_state": True,
"training_data_ratio": training_data_ratio
})
model_name = 'DualAttnRNN' # os.path.basename(__file__).split('.')[0]
context.bar_list_origin = []
context.bar_list = []
context.scale = MinMaxScaler()
context.algorithm = Algorithm(
tf.Session(config=alg_config), env, env.seq_length, env.data_dim, env.code_count, **{
"mode": mode,
"hidden_size": 5,
"enable_saver": True,
"train_steps": train_steps,
"enable_summary_writer": True,
"save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"),
}
)
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
pass
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
s1 = bar_dict[context.s1]
price = [s1.open, s1.high, s1.low, s1.close, s1.volume]
context.bar_list_origin.append(price)
scale = context.scale.fit(context.bar_list_origin)
price_scaled = scale.transform([price])
context.bar_list.append(price_scaled[0])
# if not enough bar
if len(context.bar_list) < context.algorithm.seq_length * 2 + 2:
return
# frm = len(context.bar_list)-context.algorithm.seq_length
x1 = context.bar_list[-context.algorithm.seq_length*2:-context.algorithm.seq_length]
x2 = context.bar_list[-context.algorithm.seq_length:]
x = [x1, x2]
c, a, _ = context.algorithm.predict(x)
res = np.append(_, [0, 0, 0, 0])
predict = scale.inverse_transform([res])
predict = scale.inverse_transform([res])
print('predict', predict)
pass
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
pass
rqalpha.run_func(init=init,
before_trading=before_trading,
handle_bar=handle_bar,
after_trading=after_trading,
config=config)
| zh | 0.848051 | # 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。 # os.path.basename(__file__).split('.')[0] # before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次 # 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新 # if not enough bar # frm = len(context.bar_list)-context.algorithm.seq_length # after_trading函数会在每天交易结束后被调用,当天只会被调用一次 | 1.994143 | 2 |
src/utils/logging.py | dumpmemory/MADE | 58 | 6618140 | import logging
from pathlib import Path
import sys
def get_handler(fn=None, stream=None):
formatter = logging.Formatter(
fmt="%(levelname).1s %(asctime)s %(name)s:%(lineno)d: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
h = (
logging.FileHandler(fn, mode="w")
if fn
else logging.StreamHandler(stream=stream)
)
h.setLevel(logging.INFO)
h.setFormatter(formatter)
return h
def get_resume_name(output_dir, s="output.log"):
output_logs = list(Path(output_dir).glob(f"{s}*"))
if len(output_logs) == 0:
return Path(output_dir) / s
idx = 1
for p in output_logs:
if str(p)[-1].isdigit():
p_idx = int(str(p).split(".")[-1])
idx = max(idx, p_idx + 1)
return Path(output_dir) / f"{s}.{idx}"
def initialize(output_dir, resume=False):
fn = (
get_resume_name(output_dir)
if resume
else Path(output_dir) / "output.log"
)
handlers = (
get_handler(stream=sys.stdout),
get_handler(fn=fn),
)
logging.basicConfig(handlers=handlers, force=True, level=logging.INFO)
def get_logger(name):
logging.basicConfig(handlers=[get_handler(stream=None)], level=logging.INFO)
return logging.getLogger(name)
| import logging
from pathlib import Path
import sys
def get_handler(fn=None, stream=None):
formatter = logging.Formatter(
fmt="%(levelname).1s %(asctime)s %(name)s:%(lineno)d: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
h = (
logging.FileHandler(fn, mode="w")
if fn
else logging.StreamHandler(stream=stream)
)
h.setLevel(logging.INFO)
h.setFormatter(formatter)
return h
def get_resume_name(output_dir, s="output.log"):
output_logs = list(Path(output_dir).glob(f"{s}*"))
if len(output_logs) == 0:
return Path(output_dir) / s
idx = 1
for p in output_logs:
if str(p)[-1].isdigit():
p_idx = int(str(p).split(".")[-1])
idx = max(idx, p_idx + 1)
return Path(output_dir) / f"{s}.{idx}"
def initialize(output_dir, resume=False):
fn = (
get_resume_name(output_dir)
if resume
else Path(output_dir) / "output.log"
)
handlers = (
get_handler(stream=sys.stdout),
get_handler(fn=fn),
)
logging.basicConfig(handlers=handlers, force=True, level=logging.INFO)
def get_logger(name):
logging.basicConfig(handlers=[get_handler(stream=None)], level=logging.INFO)
return logging.getLogger(name)
| none | 1 | 2.554532 | 3 | |
ample/constants.py | fsimkovic/ample | 6 | 6618141 | import os
if "CCP4" not in os.environ.keys():
msg = "Cannot find CCP4 root directory"
raise RuntimeError(msg)
__all__ = ["AMPLE_DIR", "SHARE_DIR", "AMPLE_CONFIG_FILE"]
AMPLE_DIR = os.path.join(os.environ["CCP4"], "lib", "py2", "ample")
SHARE_DIR = os.path.join(os.environ["CCP4"], "share", "ample")
AMPLE_CONFIG_FILE = os.path.join(SHARE_DIR, "include", "ample.ini")
AMPLE_LOGGER_CONFIG = os.path.join(SHARE_DIR, "include", "logging.json")
AMPLE_PKL = 'resultsd.pkl'
AMPLEDIR = 'AMPLE_'
I2DIR = 'AMPLEI2'
| import os
if "CCP4" not in os.environ.keys():
msg = "Cannot find CCP4 root directory"
raise RuntimeError(msg)
__all__ = ["AMPLE_DIR", "SHARE_DIR", "AMPLE_CONFIG_FILE"]
AMPLE_DIR = os.path.join(os.environ["CCP4"], "lib", "py2", "ample")
SHARE_DIR = os.path.join(os.environ["CCP4"], "share", "ample")
AMPLE_CONFIG_FILE = os.path.join(SHARE_DIR, "include", "ample.ini")
AMPLE_LOGGER_CONFIG = os.path.join(SHARE_DIR, "include", "logging.json")
AMPLE_PKL = 'resultsd.pkl'
AMPLEDIR = 'AMPLE_'
I2DIR = 'AMPLEI2'
| none | 1 | 2.010483 | 2 | |
preprocess/BioParser.py | thautwarm/BioInfoPlus | 0 | 6618142 | <reponame>thautwarm/BioInfoPlus<gh_stars>0
# coding: utf-8
# In[11]:
from typing import List
from .tools.Dict import Dict as UDict
from copy import deepcopy
from itertools import repeat
import re
tokenize = re.compile("[\w\.,]+")
class Preprocessing:
@staticmethod
def getColumnsAndIndex(row):
keys = tokenize.findall(row)
res = []
status = False
begin = -1
for i,ch in enumerate(row):
if status:
if ch!=" ":
continue
else:
status = False
res.append(UDict(inf=begin, sup=(i-1) ) )
else:
if ch!=" ":
status= True
begin = i
else:
continue
return res
@staticmethod
def resolveSpace(x):
if x.startswith(" "):
for i,ch in enumerate(x):
if ch!=' ':
return x[i:]
return None
return x
class Parser:
def __init__(self, colNamePart:str, backend:str ):
self.colNamePart = colNamePart
self.backend = backend
def explain(self,article:str) -> List[List[str]]:
if not hasattr(self,'res'):
lines :List[str] = article.splitlines()
part : str = self.colNamePart
colNames: str = [line for line in lines if part in line][0]
self.res:List[UDict] = Preprocessing.getColumnsAndIndex(colNames)
self.columns_row:str = colNames
def cellunit(line, tups):
tupLast, tup = tups
if line[tup.inf]==' ':
return line[tup.inf:tup.sup+1]
return Preprocessing.resolveSpace(line[tupLast.sup+1:tup.sup+1])
columns_row : str = self.columns_row
source :List[str] = lines[lines.index(columns_row):]
tupsS = list(zip(self.res[:-1],self.res[1:]))
retIter = map(lambda line: map(cellunit, repeat(line), tupsS), source)
return [list(row) for row in retIter]
# In[12]:
import pandas as pd
backend_sign = dict(dssp = " # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O ",
# part of the row which defines the attributes of our datas.
)
def bio_parse(filename, backend='dssp'):
with open(filename) as toRead:
string = toRead.read()
parser = Parser(colNamePart=backend_sign[backend], backend=backend)
results = parser.explain(string)
return pd.DataFrame(results[1:], columns = tuple(results[0]))
raise SyntaxError("The backend is not {backend}".format(backend = backend))
# In[13]:
if __name__ == '__main__':
import pandas as pd
df = bio_parse('../dssp/1a00.dssp')
print(df)
# In[14]:
if __name__ == '__main__':
print(df.columns)
# In[15]:
if __name__ == '__main__':
print( (len(set(df.AA)), len(set(df.STRUCTURE))) )
| # coding: utf-8
# In[11]:
from typing import List
from .tools.Dict import Dict as UDict
from copy import deepcopy
from itertools import repeat
import re
tokenize = re.compile("[\w\.,]+")
class Preprocessing:
@staticmethod
def getColumnsAndIndex(row):
keys = tokenize.findall(row)
res = []
status = False
begin = -1
for i,ch in enumerate(row):
if status:
if ch!=" ":
continue
else:
status = False
res.append(UDict(inf=begin, sup=(i-1) ) )
else:
if ch!=" ":
status= True
begin = i
else:
continue
return res
@staticmethod
def resolveSpace(x):
if x.startswith(" "):
for i,ch in enumerate(x):
if ch!=' ':
return x[i:]
return None
return x
class Parser:
def __init__(self, colNamePart:str, backend:str ):
self.colNamePart = colNamePart
self.backend = backend
def explain(self,article:str) -> List[List[str]]:
if not hasattr(self,'res'):
lines :List[str] = article.splitlines()
part : str = self.colNamePart
colNames: str = [line for line in lines if part in line][0]
self.res:List[UDict] = Preprocessing.getColumnsAndIndex(colNames)
self.columns_row:str = colNames
def cellunit(line, tups):
tupLast, tup = tups
if line[tup.inf]==' ':
return line[tup.inf:tup.sup+1]
return Preprocessing.resolveSpace(line[tupLast.sup+1:tup.sup+1])
columns_row : str = self.columns_row
source :List[str] = lines[lines.index(columns_row):]
tupsS = list(zip(self.res[:-1],self.res[1:]))
retIter = map(lambda line: map(cellunit, repeat(line), tupsS), source)
return [list(row) for row in retIter]
# In[12]:
import pandas as pd
backend_sign = dict(dssp = " # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O ",
# part of the row which defines the attributes of our datas.
)
def bio_parse(filename, backend='dssp'):
with open(filename) as toRead:
string = toRead.read()
parser = Parser(colNamePart=backend_sign[backend], backend=backend)
results = parser.explain(string)
return pd.DataFrame(results[1:], columns = tuple(results[0]))
raise SyntaxError("The backend is not {backend}".format(backend = backend))
# In[13]:
if __name__ == '__main__':
import pandas as pd
df = bio_parse('../dssp/1a00.dssp')
print(df)
# In[14]:
if __name__ == '__main__':
print(df.columns)
# In[15]:
if __name__ == '__main__':
print( (len(set(df.AA)), len(set(df.STRUCTURE))) ) | en | 0.48772 | # coding: utf-8 # In[11]: # In[12]: # RESIDUE AA STRUCTURE BP1 BP2 ACC N-H-->O ", # part of the row which defines the attributes of our datas. # In[13]: # In[14]: # In[15]: | 3.060603 | 3 |
app/routes.py | chrsstrm/rallycall | 0 | 6618143 | <reponame>chrsstrm/rallycall<gh_stars>0
"""
Rally Call
This app is designed to be a backup communication hub which allows persons to call in and
either leave a voice memo or listen to voice memos from other members of their group.
Copyright 2021 <NAME> <<EMAIL>>
This application is licensed under the terms of BSD-3.
Please refer to LICENSE in the project repository for details.
"""
from app import app
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import exc, or_, desc
from app import db
from flask import jsonify, request, render_template, url_for, redirect, flash, session, Response, send_file, send_from_directory
from flask_migrate import Migrate
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, current_user, user_registered
from flask_security.utils import encrypt_password, verify_password, hash_password, login_user, send_mail, logout_user
from flask_security.decorators import roles_required, roles_accepted, login_required
import os
from app.models import Users, Role, Crews, Messages, AppSafeConfig
from app.forms import CrewSettings, CrewDelete
from twilio.twiml.voice_response import VoiceResponse, Say, Gather, Record, Play, Hangup
from twilio.rest import Client
import arrow
basedir = os.path.abspath(os.path.dirname(__file__))
user_datastore = SQLAlchemyUserDatastore(db, Users, Role)
security = Security(app, user_datastore)
@app.before_first_request
def bootstrap_app():
"""Bootstrap the Flask app.
On first request we want to make sure our app is properly bootstrapped.
This involves setting up our user roles and pre-generating an admin user.
We also want to make sure our Twilio inbound number webhook is properly set (you will have
to set up your Twilio account and buy a number prior to booting this app.)
We have three roles:
1. System Admin - The super admin who controls the entire system - unlimited permissions.
2. Account Admin - The owner of a Crew. These permissions allow administering the Crew and its members.
3. Basic User - A member of a Crew, least priviledges.
*This func will only run when the first _request_ is made, not when the app starts*
To properly bootstrap the app, start the app and then visit / or /login, then promptly use the Reset Password function
to change the System Admin password.
"""
db.create_all()
app.logger.debug('doing bootstrap')
bootstrap_email = app.config['BOOTSTRAP_ADMIN_EMAIL']
#check for roles
admin = user_datastore.find_role('admin')
if not admin:
user_datastore.create_role(name='admin', description='admin user role')
db.session.commit()
admin = user_datastore.find_role('admin')
crew_admin = user_datastore.find_role('crew_admin')
if not crew_admin:
user_datastore.create_role(name='crew_admin', description='crew_admin user role')
db.session.commit()
basic_user = user_datastore.find_role('basic_user')
if not basic_user:
user_datastore.create_role(name='basic_user', description='basic user role')
db.session.commit()
#check for system admin user
admin_user = user_datastore.find_user(email=bootstrap_email)
if not admin_user:
user_datastore.create_user(email=bootstrap_email, password=hash_password(app.config['BOOTSTRAP_ADMIN_PASS']))
db.session.commit()
admin_user = user_datastore.find_user(email=bootstrap_email)
user_datastore.add_role_to_user(admin_user, admin)
db.session.commit()
# Update our Twilio account to make sure the inbound number we're using for this app
# has a proper webhook setting.
number_sid = ''
try:
client = Client(app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN'])
incoming_phone_numbers = client.incoming_phone_numbers.list(phone_number=app.config['TWILIO_INBOUND_NUMBER'], limit=1)
for record in incoming_phone_numbers:
number_sid = record.sid
except Exception as _:
app.logger.debug("Attempt to find given Twilio number failed. Check account credentials and Twilio Inbound Number and try again.")
try:
# Set the webhook for our inbound number to the 'account_lookup_route' entrypoint for our inbound calls.
# This block will make sure that our Twilio account is always up to date in terms of what the fqd
# endpoint is. This makes it easy to port this app from dev to prod and even to new domains without
# having to log into Twilio and make changes manually.
# The SMS URL is deliberately set to the empty string to reject any SMS sent to the number.
_ = client \
.incoming_phone_numbers(number_sid) \
.update(
sms_url='',
voice_method='POST',
voice_url=url_for('account_lookup_route', _external=True, _scheme='https')
)
except Exception as _:
app.logger.debug("Could not update Twilio inbound number properties")
def friendly_date(timestamp):
"""
Define a template filter to display the Message creation date
in a more human-friendly form.
Then make sure to register the new filter.
"""
c_date = arrow.get(timestamp)
return c_date.humanize()
def utc_date(timestamp):
"""
Define a template filter to display UTC dates
"""
c_date = arrow.get(timestamp)
return c_date.format(arrow.FORMAT_COOKIE)
def currency_format(value):
value = float(value)
return "${:,.2f}".format(value)
app.add_template_filter(friendly_date)
app.add_template_filter(utc_date)
app.add_template_filter(currency_format)
@app.context_processor
def inject_crew():
"""Make sure to include the crew object in each route
for use in the template.
"""
user = current_user
if user.has_role('crew_admin') or user.has_role('basic_user'):
return dict(crew=Crews.query.get(user.crew_id))
else:
return dict(crew=None)
@app.context_processor
def inject_config():
"""Make sure to include the global configs in each route
for use in the template.
"""
config = AppSafeConfig()
return dict(config=config)
@app.route('/')
def index_route():
'''
The public homepage.
'''
return render_template('root-index.html')
@user_registered.connect_via(app)
def user_registered_sighandler(sender, user, confirm_token):
'''
We're going to use the Flask-Security built in /register endpoint to register
a user, but we will also need to assign this user a role and bootstrap their
Crew (any user using /register will be a Crew admin, basic users do not need to register).
Hook into the user_registered Signal to make these changes when someone registers.
1. Set user's role to 'crew_admin'
2. Bootstrap the Crew
'''
# this next piece should maybe be wrapped in a try block.
# if the user role wasn't found or couldn't be added to the user,
# what would we do? delete the user and tell them to register again?
# we wouldn't want a random user in the system with no role or crew.
crew_admin = user_datastore.find_role('crew_admin')
user_datastore.add_role_to_user(user, crew_admin)
db.session.commit()
crew = Crews()
user.crew_id = crew.id
user.crew_admin = True
db.session.add(crew)
db.session.commit()
login_user(user)
@app.route('/home')
@login_required
@roles_accepted('crew_admin', 'basic_user', 'admin')
def home_route():
if current_user.has_role('admin'):
message_count = Messages.query.count()
crew_count = Crews.query.count()
client = client = Client(app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN'])
inbound_calls = client.usage.records.list(category="calls-inbound")[0]
recordings = client.usage.records.list(category="calls-recordings")[0]
totalprice = client.usage.records.list(category="totalprice")[0]
else:
message_count = None
crew_count = None
records = None
inbound_calls = None
recordings = None
totalprice = None
return render_template('home.html', message_count=message_count, crew_count=crew_count, recordings=recordings, inbound_calls=inbound_calls, totalprice=totalprice)
@app.route('/home/members', methods=['GET'])
@login_required
@roles_accepted('crew_admin')
def home_members_route():
return render_template('home_members.html')
@app.route('/home/messages', methods=['GET'])
@login_required
@roles_accepted('crew_admin', 'basic_user')
def home_messages_route():
"""
Display all Messages for the Crew that are not deleted.
"""
crew = Crews.query.get(current_user.crew_id)
page = request.args.get("page", 1, type=int)
messages = Messages.\
query.\
filter_by(crew_id=crew.id).\
filter(~Messages.status.in_(['deleted'])).\
order_by(Messages.created.desc()).\
paginate(page, app.config['POSTS_PER_PAGE'], True)
next_url = url_for('home_messages_route', page=messages.next_num) if messages.has_next else None
prev_url = url_for('home_messages_route', page=messages.prev_num) if messages.has_prev else None
return render_template('home_messages.html', messages=messages.items, next=next_url, prev=prev_url)
@app.route('/home/messages/<id>', methods=['DELETE'])
@login_required
@roles_accepted('crew_admin')
def home_del_message_route(id):
"""
Route just for deleting Messages.
We don't actually do a physical delete though, just set status to 'deleted'
This route does not return a view, it redirects to the messages route.
"""
try:
message = Messages.query.get(id)
if message:
message.status = 'deleted'
db.session.commit()
flash('Your message was deleted.', category='success')
else:
flash('This message does not exist.', category='error')
except Exception as _:
flash('This message could not be deleted.', category='error')
return jsonify(success=True), 200, {'ContentType':'application/json'}
@app.route('/home/settings', methods=['GET', 'POST'])
@login_required
@roles_accepted('crew_admin', 'admin')
def home_settings_route():
"""
Option for the Crew Admin to add a Crew Name and
'protect' the account by adding an Access Code.
Can also delete the Crew from this view.
"""
if current_user.has_role('crew_admin'):
crew = Crews.query.get(current_user.crew_id)
else:
crew = None
form = CrewSettings(obj=crew)
del_form = CrewDelete()
if current_user.has_role('crew_admin') and form.validate_on_submit():
try:
crew.name = form.name.data
if form.access_code.data == '':
crew.access_code = None
else:
crew.access_code = form.access_code.data
db.session.commit()
flash('Your crew settings have been saved.', category='success')
except Exception as _:
flash('Your settings were not saved, an error ocurred.', category='error')
return render_template('home_settings.html', form=form, del_form=del_form)
@app.route('/home/crews', methods=['GET','POST'])
@login_required
@roles_accepted('admin')
def home_admin_crews_route():
"""
System Admin route for viewing all Crews in the system.
"""
page = request.args.get("page", 1, type=int)
crews = Crews.query.paginate(page, app.config['POSTS_PER_PAGE'], True)
next_url = url_for('home_admin_crews_route', page=crews.next_num) if crews.has_next else None
prev_url = url_for('home_admin_crews_route', page=crews.prev_num) if crews.has_prev else None
return render_template('home_crews.html', crews=crews.items)
@app.route('/home/crews/<id>', methods=['GET','POST', 'DELETE'])
@login_required
@roles_accepted('admin')
def home_admin_crew_route(id):
"""
System Admin route for viewing details of a single Crew.
Can delete or suspend Crew from this view.
"""
try:
crew = Crews.query.get(id)
if not crew:
raise Exception('No crew found.')
except Exception as _:
flash('Could not access this Crew.', category='error')
return redirect(url_for('home_admin_crews_route'))
if request.method == 'DELETE':
"""
System admin handler for 'deleting' a Crew.
TODO: This can be refactored along with the crew-admin delete endpoint by
adding Crew methods for deleting users and the Crew itself instead of
repeating code in multiple endpoint handlers.
"""
try:
"""
Deactivate all Crew users and change their status.
active=False will not allow them to log in.
"""
users = Users.query.filter_by(crew_id=id).all()
for user in users:
user.active = False
user.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew members, please try again.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
try:
"""
Change the Crew status to deleted
"""
crew.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew, please try again.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
flash('Crew deleted.', category='success')
return redirect(url_for('home_admin_crews_route'))
if request.method == 'POST':
"""
This handler will look for the 'action' param to determine
whether we are going to restore or suspend the account.
"""
if 'action' in request.args:
if request.args.get('action') == 'suspend':
"""
Set status of the Crew to suspended.
This will not prevent the admin or users from
logging in, but they cannot listen to or record messages.
"""
crew.status = 'suspended'
db.session.commit()
flash('Crew suspended.', category='success')
return redirect(url_for('home_admin_crew_route', id=id))
if request.args.get('action') == 'restore':
"""
Restore a Crew.
This will change the status of the Crew and restore login
access for all users.
This could conflict with a user that was previously removed from
a Crew, but I'm not sure how to solve this given the structure we
currently have. YOLO.
"""
crew.status = 'active'
db.session.commit()
users = Users.query.filter_by(crew_id=id).all()
for user in users:
user.active = True
user.status = 'active'
db.session.commit()
flash('Crew restored.', category='success')
return redirect(url_for('home_admin_crew_route', id=id))
else:
flash('Invalid request.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
admins = crew.members.filter_by(crew_admin = True).all()
return render_template('home_crew.html', crew=crew, admins=admins)
@app.route('/crews', methods=['POST'])
@login_required
@roles_accepted('crew_admin')
def crew_delete_route():
"""
Crews route that is currently strictly for deleting the entire
account.
Will prompt for the admin's password and then:
- deactivate all members
- deactivate the admin
- set the status of the Crew to deleted
"""
form = CrewDelete()
crew = Crews.query.get(current_user.crew_id)
if form.validate_on_submit():
try:
if verify_password(form.password.data, current_user.password):
"""
Request the admin's password before we destroy the account
"""
pass
else:
flash('Your password was not correct.', category='error')
return redirect(url_for('home_settings_route'))
except Exception as _:
flash('There was an error deleting this account.', category='error')
return redirect(url_for('home_settings_route'))
try:
"""
Deactivate all Crew users and change their status.
active=False will not allow them to log in.
"""
users = Users.query.filter_by(crew_id=crew.id).all()
for user in users:
user.active = False
user.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew members, please try again.', category='error')
return redirect(url_for('home_settings_route'))
try:
"""
Change the Crew status to deleted
"""
crew.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew, please try again.', category='error')
return redirect(url_for('home_settings_route'))
# finally, log out the user
logout_user()
return redirect(url_for('index_route'))
@app.route('/account-lookup', methods=["POST"])
def account_lookup_route():
'''
This is the Twilio entrypoint for all incoming new calls.
This endpoint will play a welcome message and then gather the Account PIN to identify the account.
We will set a session var and allow 4 tries to get the Account PIN correct, otherwise we end the call (just
basic brute force protection).
If this request contains the 'Digits' var, then we have collected the Account PIN from the user and must verify it.
Successful verification will send the call to the 'main_menu_route' to offer the user the main menu.
If the Crew account contains an Access Code that is not None, we must send the call to the 'check_pin_route' endpoint.
Our check for whether or not this step is required will be determined by setting a session var 'Protected_Session'. This var
will be instantiated to False when created and must be changed to True in order to allow playing or recording of Messages.
'''
resp = VoiceResponse()
# if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin
if 'Digits' in request.values:
provided_pin = str(request.values['Digits'])
crew = Crews.query.filter_by(account_pin=provided_pin).first()
# basic pin brute force protection that limits tries to 4
if 'account_pin_tries' in session and session['account_pin_tries'] == 5:
return redirect(url_for('hangup_route'), code=307)
if crew is None:
# crew not found or bad pin
if not 'account_pin_tries' in session:
session['account_pin_tries'] = 1
else:
session['account_pin_tries'] += 1
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('account_lookup_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"Your input was not correct. Please input your account pin and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
elif crew.status in ['suspended', 'deleted']:
# crew is deleted or deactivated by an admin, kill call
resp.say(f"This account is deactivated. Goodbye.", voice=app.config['TWILIO_VOICE_SETTING'])
hangup = Hangup()
resp.append(hangup)
return str(resp)
elif crew.access_code is not None:
# crew is using an access code, send to the check pin route
session['sessionCrewId'] = crew.id
return redirect(url_for('check_pin_route'), code=307)
else:
# crew exists and is in good standing, proceed
session['sessionCrewId'] = crew.id
return redirect(url_for('main_menu_route'), code=307)
# this block will be run during the very first POST to this route, signaling a new incoming call
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('account_lookup_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"You have reached { app.config['APP_NAME'] }. Please enter your account pin and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/check-pin', methods=['GET', 'POST'])
def check_pin_route():
'''
If the Crew account is "protected" then we must direct to this endpoint for collection of the account Access Code.
Since the user may use either the Crew acccess code or their individual access code, we have a Crews func that will
gather up all possible codes and load them in a list. We just check the list to see if what the user input matches any
of the entries. Again, not super secure, this is just a best intentions effort to protect messages.
This endpoint contains a basic brute force protection which will only allow 4 attempts before ending the call.
'''
resp = VoiceResponse()
if not 'sessionCrewId' in session:
return redirect(url_for('hangup_route'), code=307)
else:
crew = Crews.query.get(session['sessionCrewId'])
codes_list = crew.gather_access_codes()
if 'Digits' in request.values:
# basic pin brute force protection that limits tries to 4
if 'access_code_tries' in session and session['access_code_tries'] == 5:
return redirect(url_for('hangup_route'), code=307)
provided_code = str(request.values['Digits'])
if provided_code in codes_list:
# all is good, send to the menu
return redirect(url_for('main_menu_route'), code=307)
else:
if not 'access_code_tries' in session:
session['access_code_tries'] = 1
else:
session['access_code_tries'] += 1
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('check_pin_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"Your input was not correct. Please input your access code and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('check_pin_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say("This account is protected. Please enter the access code, followed by the pound sign.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/main-menu', methods=['GET', 'POST'])
def main_menu_route():
'''
The primary route for our Twilio voice menu.
Hitting this route with no request params means we are starting fresh with a new call.
At this stage we should provide a Message count and offer to play Messages or record a new Message.
We will use 1 to listen to Messages and 2 to record a Message.
The presence of 'Digits' in our request vars will indicate which route we will redirect to:
1 - redirect to the 'play_route'
2 - redirect to the 'record_route'
'''
message_count = 0
session['messages'] = None
# at this point sessionCrew should be set, if not, kill
if session['sessionCrewId'] is None:
return redirect(url_for('hangup_route'), code=307)
else:
crew = Crews.query.get(session['sessionCrewId'])
# need to adjust this to exclude 'deleted' status messages
message_count = Messages.query.filter_by(crew_id=crew.id).count()
# grab all Messages and put them in the session
# we will do this every time as this can change each time the user hits this endpoint
messages = Messages.query.filter_by(crew_id=session['sessionCrewId']).order_by(Messages.created.desc()).all()
session['messages'] = Messages.make_ordered_list(messages)
resp = VoiceResponse()
# if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin
if 'Digits' in request.values:
choice = str(request.values['Digits'])
if choice == '1':
# user wants to listen, redirect to play route
return redirect(url_for('play_route'), code=307)
elif choice == '2':
# user wants to record, redirect to record route
return redirect(url_for('record_route'), code=307)
elif choice == '3':
# catching a return to main menu
pass
else:
# invalid choice, try again
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=4)
gather.say(f"That was not a valid selection. To listen to messages, press 1. To record a message, press 2.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
# this block will be run during the very first POST to this route, signaling a new incoming call
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=3, finishOnKey='#')
gather.say(f"Main menu. You have {message_count} messages. To listen to messages, press 1. To record a message, press 2. If you are finished, you may hang up.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/record', methods=['GET', 'POST'])
def record_route():
'''
The route responsible for handling recording a new Message.
From a fresh state we simply prompt a Record action.
If the 'RecordingSid' and 'RecordingUrl' and 'RecordingStatus' vars are present, we are returning to this
menu directly after recording a Message (so handle creating a new Message).
After the new Message has been created and committed, we then redirect back to the 'main_menu_route'
'''
resp = VoiceResponse()
if 'RecordingSid' and 'RecordingUrl' in request.values:
# we have a recording from Twilio, create a new Message and save it. Redirect to main menu.
url = request.values['RecordingUrl']
message = Messages(crew_id=session['sessionCrewId'], url=url)
db.session.add(message)
# if someone is recording messages, we can assume this is an active crew
crew = Crews.query.get(session['sessionCrewId'])
crew.status = 'active'
db.session.add(crew)
db.session.commit()
return redirect(url_for('main_menu_route'), code=307)
record = Record(action=app.config['APP_BASE_URL'] + url_for('record_route'), method='POST', maxLength=app.config['TWILIO_RECORDING_MAXLENGTH'], playBeep=True, timeout=3, finishOnKey='#')
say = Say(f"Record your message after the beep. Press the pound key when you are finished.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(say)
resp.append(record)
return str(resp)
@app.route('/play', methods=['GET', 'POST'])
def play_route():
'''
This route is responsible for playing existing Messages.
We want to list Messages in reverse chronological order, so newest Message plays first.
As we will allow the user to skip through to the next Message at any time by hitting 1, we will want
to keep the Message list in a session; this list will contain the UUID of the Messages available.
Instead of trying to keep a cursor for our position in the list, we will pop off each Message as it is
played. This of course means we cannot go backwards in our list - we will just prompt the user to go back
to the main menu and start again.
'''
resp = VoiceResponse()
message_list = session['messages']
if not message_list:
# no messages to play, send back to main menu
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=3)
gather.say(f"There are no more messages. Press three to return to the main menu, or simply hang up.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
if 'Digits' in request.values:
"""
We really only need to look for a prompt for the main menu.
If the user wants to skip, this endpoint gets reloaded and the session will pop off
the next message automatically.
"""
choice = str(request.values['Digits'])
if choice == '3':
# user wants to go to main menu
return redirect(url_for('main_menu_route'), code=307)
message_list = session['messages']
message_to_play = message_list.pop(0)
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('play_route'), method='POST', input="dtmf", num_digits=1)
gather.say(f"Playing message. To skip to the next message, press 1. To go back to the main menu, press 3", voice=app.config['TWILIO_VOICE_SETTING'])
gather.play(f"{ message_to_play['url']}.mp3")
resp.append(gather)
return str(resp)
@app.route('/hangup', methods=['GET', 'POST'])
def hangup_route():
'''
We don't need a hangup route, but having one would be nice so we can clean up
the sessions and end the call properly.
'''
session.clear()
resp = VoiceResponse()
hangup = Hangup()
resp.append(hangup)
return str(resp)
@app.errorhandler(404)
def page_not_found(e):
'''
Handle any pages not found.
'''
return render_template('404.html'), 404
@app.errorhandler(500)
def system_err(e):
"""
Handling a 500 error is simply to prevent display of debugging info or a browser-generated
error.
You should attempt to alert the system admin if this endpoint is ever reached, but monitoring
and error collection is out of scope for this iteration of the project.
"""
return render_template('500.html'), 500
'''
TODO items
TODO - build out crew members dashboard
TODO - change crew status to active after any message activity
TODO - fix dashboard mobile nav
TODO - fix messages mobile nav
TODO - the JS location.assign after message del is losing the flash success message.
TODO - crew invitation system
''' | """
Rally Call
This app is designed to be a backup communication hub which allows persons to call in and
either leave a voice memo or listen to voice memos from other members of their group.
Copyright 2021 <NAME> <<EMAIL>>
This application is licensed under the terms of BSD-3.
Please refer to LICENSE in the project repository for details.
"""
from app import app
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import exc, or_, desc
from app import db
from flask import jsonify, request, render_template, url_for, redirect, flash, session, Response, send_file, send_from_directory
from flask_migrate import Migrate
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, current_user, user_registered
from flask_security.utils import encrypt_password, verify_password, hash_password, login_user, send_mail, logout_user
from flask_security.decorators import roles_required, roles_accepted, login_required
import os
from app.models import Users, Role, Crews, Messages, AppSafeConfig
from app.forms import CrewSettings, CrewDelete
from twilio.twiml.voice_response import VoiceResponse, Say, Gather, Record, Play, Hangup
from twilio.rest import Client
import arrow
basedir = os.path.abspath(os.path.dirname(__file__))
user_datastore = SQLAlchemyUserDatastore(db, Users, Role)
security = Security(app, user_datastore)
@app.before_first_request
def bootstrap_app():
"""Bootstrap the Flask app.
On first request we want to make sure our app is properly bootstrapped.
This involves setting up our user roles and pre-generating an admin user.
We also want to make sure our Twilio inbound number webhook is properly set (you will have
to set up your Twilio account and buy a number prior to booting this app.)
We have three roles:
1. System Admin - The super admin who controls the entire system - unlimited permissions.
2. Account Admin - The owner of a Crew. These permissions allow administering the Crew and its members.
3. Basic User - A member of a Crew, least priviledges.
*This func will only run when the first _request_ is made, not when the app starts*
To properly bootstrap the app, start the app and then visit / or /login, then promptly use the Reset Password function
to change the System Admin password.
"""
db.create_all()
app.logger.debug('doing bootstrap')
bootstrap_email = app.config['BOOTSTRAP_ADMIN_EMAIL']
#check for roles
admin = user_datastore.find_role('admin')
if not admin:
user_datastore.create_role(name='admin', description='admin user role')
db.session.commit()
admin = user_datastore.find_role('admin')
crew_admin = user_datastore.find_role('crew_admin')
if not crew_admin:
user_datastore.create_role(name='crew_admin', description='crew_admin user role')
db.session.commit()
basic_user = user_datastore.find_role('basic_user')
if not basic_user:
user_datastore.create_role(name='basic_user', description='basic user role')
db.session.commit()
#check for system admin user
admin_user = user_datastore.find_user(email=bootstrap_email)
if not admin_user:
user_datastore.create_user(email=bootstrap_email, password=hash_password(app.config['BOOTSTRAP_ADMIN_PASS']))
db.session.commit()
admin_user = user_datastore.find_user(email=bootstrap_email)
user_datastore.add_role_to_user(admin_user, admin)
db.session.commit()
# Update our Twilio account to make sure the inbound number we're using for this app
# has a proper webhook setting.
number_sid = ''
try:
client = Client(app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN'])
incoming_phone_numbers = client.incoming_phone_numbers.list(phone_number=app.config['TWILIO_INBOUND_NUMBER'], limit=1)
for record in incoming_phone_numbers:
number_sid = record.sid
except Exception as _:
app.logger.debug("Attempt to find given Twilio number failed. Check account credentials and Twilio Inbound Number and try again.")
try:
# Set the webhook for our inbound number to the 'account_lookup_route' entrypoint for our inbound calls.
# This block will make sure that our Twilio account is always up to date in terms of what the fqd
# endpoint is. This makes it easy to port this app from dev to prod and even to new domains without
# having to log into Twilio and make changes manually.
# The SMS URL is deliberately set to the empty string to reject any SMS sent to the number.
_ = client \
.incoming_phone_numbers(number_sid) \
.update(
sms_url='',
voice_method='POST',
voice_url=url_for('account_lookup_route', _external=True, _scheme='https')
)
except Exception as _:
app.logger.debug("Could not update Twilio inbound number properties")
def friendly_date(timestamp):
"""
Define a template filter to display the Message creation date
in a more human-friendly form.
Then make sure to register the new filter.
"""
c_date = arrow.get(timestamp)
return c_date.humanize()
def utc_date(timestamp):
"""
Define a template filter to display UTC dates
"""
c_date = arrow.get(timestamp)
return c_date.format(arrow.FORMAT_COOKIE)
def currency_format(value):
value = float(value)
return "${:,.2f}".format(value)
app.add_template_filter(friendly_date)
app.add_template_filter(utc_date)
app.add_template_filter(currency_format)
@app.context_processor
def inject_crew():
"""Make sure to include the crew object in each route
for use in the template.
"""
user = current_user
if user.has_role('crew_admin') or user.has_role('basic_user'):
return dict(crew=Crews.query.get(user.crew_id))
else:
return dict(crew=None)
@app.context_processor
def inject_config():
"""Make sure to include the global configs in each route
for use in the template.
"""
config = AppSafeConfig()
return dict(config=config)
@app.route('/')
def index_route():
'''
The public homepage.
'''
return render_template('root-index.html')
@user_registered.connect_via(app)
def user_registered_sighandler(sender, user, confirm_token):
'''
We're going to use the Flask-Security built in /register endpoint to register
a user, but we will also need to assign this user a role and bootstrap their
Crew (any user using /register will be a Crew admin, basic users do not need to register).
Hook into the user_registered Signal to make these changes when someone registers.
1. Set user's role to 'crew_admin'
2. Bootstrap the Crew
'''
# this next piece should maybe be wrapped in a try block.
# if the user role wasn't found or couldn't be added to the user,
# what would we do? delete the user and tell them to register again?
# we wouldn't want a random user in the system with no role or crew.
crew_admin = user_datastore.find_role('crew_admin')
user_datastore.add_role_to_user(user, crew_admin)
db.session.commit()
crew = Crews()
user.crew_id = crew.id
user.crew_admin = True
db.session.add(crew)
db.session.commit()
login_user(user)
@app.route('/home')
@login_required
@roles_accepted('crew_admin', 'basic_user', 'admin')
def home_route():
if current_user.has_role('admin'):
message_count = Messages.query.count()
crew_count = Crews.query.count()
client = client = Client(app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN'])
inbound_calls = client.usage.records.list(category="calls-inbound")[0]
recordings = client.usage.records.list(category="calls-recordings")[0]
totalprice = client.usage.records.list(category="totalprice")[0]
else:
message_count = None
crew_count = None
records = None
inbound_calls = None
recordings = None
totalprice = None
return render_template('home.html', message_count=message_count, crew_count=crew_count, recordings=recordings, inbound_calls=inbound_calls, totalprice=totalprice)
@app.route('/home/members', methods=['GET'])
@login_required
@roles_accepted('crew_admin')
def home_members_route():
return render_template('home_members.html')
@app.route('/home/messages', methods=['GET'])
@login_required
@roles_accepted('crew_admin', 'basic_user')
def home_messages_route():
"""
Display all Messages for the Crew that are not deleted.
"""
crew = Crews.query.get(current_user.crew_id)
page = request.args.get("page", 1, type=int)
messages = Messages.\
query.\
filter_by(crew_id=crew.id).\
filter(~Messages.status.in_(['deleted'])).\
order_by(Messages.created.desc()).\
paginate(page, app.config['POSTS_PER_PAGE'], True)
next_url = url_for('home_messages_route', page=messages.next_num) if messages.has_next else None
prev_url = url_for('home_messages_route', page=messages.prev_num) if messages.has_prev else None
return render_template('home_messages.html', messages=messages.items, next=next_url, prev=prev_url)
@app.route('/home/messages/<id>', methods=['DELETE'])
@login_required
@roles_accepted('crew_admin')
def home_del_message_route(id):
"""
Route just for deleting Messages.
We don't actually do a physical delete though, just set status to 'deleted'
This route does not return a view, it redirects to the messages route.
"""
try:
message = Messages.query.get(id)
if message:
message.status = 'deleted'
db.session.commit()
flash('Your message was deleted.', category='success')
else:
flash('This message does not exist.', category='error')
except Exception as _:
flash('This message could not be deleted.', category='error')
return jsonify(success=True), 200, {'ContentType':'application/json'}
@app.route('/home/settings', methods=['GET', 'POST'])
@login_required
@roles_accepted('crew_admin', 'admin')
def home_settings_route():
"""
Option for the Crew Admin to add a Crew Name and
'protect' the account by adding an Access Code.
Can also delete the Crew from this view.
"""
if current_user.has_role('crew_admin'):
crew = Crews.query.get(current_user.crew_id)
else:
crew = None
form = CrewSettings(obj=crew)
del_form = CrewDelete()
if current_user.has_role('crew_admin') and form.validate_on_submit():
try:
crew.name = form.name.data
if form.access_code.data == '':
crew.access_code = None
else:
crew.access_code = form.access_code.data
db.session.commit()
flash('Your crew settings have been saved.', category='success')
except Exception as _:
flash('Your settings were not saved, an error ocurred.', category='error')
return render_template('home_settings.html', form=form, del_form=del_form)
@app.route('/home/crews', methods=['GET','POST'])
@login_required
@roles_accepted('admin')
def home_admin_crews_route():
"""
System Admin route for viewing all Crews in the system.
"""
page = request.args.get("page", 1, type=int)
crews = Crews.query.paginate(page, app.config['POSTS_PER_PAGE'], True)
next_url = url_for('home_admin_crews_route', page=crews.next_num) if crews.has_next else None
prev_url = url_for('home_admin_crews_route', page=crews.prev_num) if crews.has_prev else None
return render_template('home_crews.html', crews=crews.items)
@app.route('/home/crews/<id>', methods=['GET','POST', 'DELETE'])
@login_required
@roles_accepted('admin')
def home_admin_crew_route(id):
"""
System Admin route for viewing details of a single Crew.
Can delete or suspend Crew from this view.
"""
try:
crew = Crews.query.get(id)
if not crew:
raise Exception('No crew found.')
except Exception as _:
flash('Could not access this Crew.', category='error')
return redirect(url_for('home_admin_crews_route'))
if request.method == 'DELETE':
"""
System admin handler for 'deleting' a Crew.
TODO: This can be refactored along with the crew-admin delete endpoint by
adding Crew methods for deleting users and the Crew itself instead of
repeating code in multiple endpoint handlers.
"""
try:
"""
Deactivate all Crew users and change their status.
active=False will not allow them to log in.
"""
users = Users.query.filter_by(crew_id=id).all()
for user in users:
user.active = False
user.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew members, please try again.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
try:
"""
Change the Crew status to deleted
"""
crew.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew, please try again.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
flash('Crew deleted.', category='success')
return redirect(url_for('home_admin_crews_route'))
if request.method == 'POST':
"""
This handler will look for the 'action' param to determine
whether we are going to restore or suspend the account.
"""
if 'action' in request.args:
if request.args.get('action') == 'suspend':
"""
Set status of the Crew to suspended.
This will not prevent the admin or users from
logging in, but they cannot listen to or record messages.
"""
crew.status = 'suspended'
db.session.commit()
flash('Crew suspended.', category='success')
return redirect(url_for('home_admin_crew_route', id=id))
if request.args.get('action') == 'restore':
"""
Restore a Crew.
This will change the status of the Crew and restore login
access for all users.
This could conflict with a user that was previously removed from
a Crew, but I'm not sure how to solve this given the structure we
currently have. YOLO.
"""
crew.status = 'active'
db.session.commit()
users = Users.query.filter_by(crew_id=id).all()
for user in users:
user.active = True
user.status = 'active'
db.session.commit()
flash('Crew restored.', category='success')
return redirect(url_for('home_admin_crew_route', id=id))
else:
flash('Invalid request.', category='error')
return redirect(url_for('home_admin_crew_route', id=id))
admins = crew.members.filter_by(crew_admin = True).all()
return render_template('home_crew.html', crew=crew, admins=admins)
@app.route('/crews', methods=['POST'])
@login_required
@roles_accepted('crew_admin')
def crew_delete_route():
"""
Crews route that is currently strictly for deleting the entire
account.
Will prompt for the admin's password and then:
- deactivate all members
- deactivate the admin
- set the status of the Crew to deleted
"""
form = CrewDelete()
crew = Crews.query.get(current_user.crew_id)
if form.validate_on_submit():
try:
if verify_password(form.password.data, current_user.password):
"""
Request the admin's password before we destroy the account
"""
pass
else:
flash('Your password was not correct.', category='error')
return redirect(url_for('home_settings_route'))
except Exception as _:
flash('There was an error deleting this account.', category='error')
return redirect(url_for('home_settings_route'))
try:
"""
Deactivate all Crew users and change their status.
active=False will not allow them to log in.
"""
users = Users.query.filter_by(crew_id=crew.id).all()
for user in users:
user.active = False
user.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew members, please try again.', category='error')
return redirect(url_for('home_settings_route'))
try:
"""
Change the Crew status to deleted
"""
crew.status = 'deleted'
db.session.commit()
except Exception as _:
flash('Could not remove Crew, please try again.', category='error')
return redirect(url_for('home_settings_route'))
# finally, log out the user
logout_user()
return redirect(url_for('index_route'))
@app.route('/account-lookup', methods=["POST"])
def account_lookup_route():
'''
This is the Twilio entrypoint for all incoming new calls.
This endpoint will play a welcome message and then gather the Account PIN to identify the account.
We will set a session var and allow 4 tries to get the Account PIN correct, otherwise we end the call (just
basic brute force protection).
If this request contains the 'Digits' var, then we have collected the Account PIN from the user and must verify it.
Successful verification will send the call to the 'main_menu_route' to offer the user the main menu.
If the Crew account contains an Access Code that is not None, we must send the call to the 'check_pin_route' endpoint.
Our check for whether or not this step is required will be determined by setting a session var 'Protected_Session'. This var
will be instantiated to False when created and must be changed to True in order to allow playing or recording of Messages.
'''
resp = VoiceResponse()
# if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin
if 'Digits' in request.values:
provided_pin = str(request.values['Digits'])
crew = Crews.query.filter_by(account_pin=provided_pin).first()
# basic pin brute force protection that limits tries to 4
if 'account_pin_tries' in session and session['account_pin_tries'] == 5:
return redirect(url_for('hangup_route'), code=307)
if crew is None:
# crew not found or bad pin
if not 'account_pin_tries' in session:
session['account_pin_tries'] = 1
else:
session['account_pin_tries'] += 1
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('account_lookup_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"Your input was not correct. Please input your account pin and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
elif crew.status in ['suspended', 'deleted']:
# crew is deleted or deactivated by an admin, kill call
resp.say(f"This account is deactivated. Goodbye.", voice=app.config['TWILIO_VOICE_SETTING'])
hangup = Hangup()
resp.append(hangup)
return str(resp)
elif crew.access_code is not None:
# crew is using an access code, send to the check pin route
session['sessionCrewId'] = crew.id
return redirect(url_for('check_pin_route'), code=307)
else:
# crew exists and is in good standing, proceed
session['sessionCrewId'] = crew.id
return redirect(url_for('main_menu_route'), code=307)
# this block will be run during the very first POST to this route, signaling a new incoming call
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('account_lookup_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"You have reached { app.config['APP_NAME'] }. Please enter your account pin and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/check-pin', methods=['GET', 'POST'])
def check_pin_route():
'''
If the Crew account is "protected" then we must direct to this endpoint for collection of the account Access Code.
Since the user may use either the Crew acccess code or their individual access code, we have a Crews func that will
gather up all possible codes and load them in a list. We just check the list to see if what the user input matches any
of the entries. Again, not super secure, this is just a best intentions effort to protect messages.
This endpoint contains a basic brute force protection which will only allow 4 attempts before ending the call.
'''
resp = VoiceResponse()
if not 'sessionCrewId' in session:
return redirect(url_for('hangup_route'), code=307)
else:
crew = Crews.query.get(session['sessionCrewId'])
codes_list = crew.gather_access_codes()
if 'Digits' in request.values:
# basic pin brute force protection that limits tries to 4
if 'access_code_tries' in session and session['access_code_tries'] == 5:
return redirect(url_for('hangup_route'), code=307)
provided_code = str(request.values['Digits'])
if provided_code in codes_list:
# all is good, send to the menu
return redirect(url_for('main_menu_route'), code=307)
else:
if not 'access_code_tries' in session:
session['access_code_tries'] = 1
else:
session['access_code_tries'] += 1
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('check_pin_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say(f"Your input was not correct. Please input your access code and then press the pound key.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('check_pin_route'), method='POST', input="dtmf", timeout=3, finishOnKey='#')
gather.say("This account is protected. Please enter the access code, followed by the pound sign.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/main-menu', methods=['GET', 'POST'])
def main_menu_route():
'''
The primary route for our Twilio voice menu.
Hitting this route with no request params means we are starting fresh with a new call.
At this stage we should provide a Message count and offer to play Messages or record a new Message.
We will use 1 to listen to Messages and 2 to record a Message.
The presence of 'Digits' in our request vars will indicate which route we will redirect to:
1 - redirect to the 'play_route'
2 - redirect to the 'record_route'
'''
message_count = 0
session['messages'] = None
# at this point sessionCrew should be set, if not, kill
if session['sessionCrewId'] is None:
return redirect(url_for('hangup_route'), code=307)
else:
crew = Crews.query.get(session['sessionCrewId'])
# need to adjust this to exclude 'deleted' status messages
message_count = Messages.query.filter_by(crew_id=crew.id).count()
# grab all Messages and put them in the session
# we will do this every time as this can change each time the user hits this endpoint
messages = Messages.query.filter_by(crew_id=session['sessionCrewId']).order_by(Messages.created.desc()).all()
session['messages'] = Messages.make_ordered_list(messages)
resp = VoiceResponse()
# if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin
if 'Digits' in request.values:
choice = str(request.values['Digits'])
if choice == '1':
# user wants to listen, redirect to play route
return redirect(url_for('play_route'), code=307)
elif choice == '2':
# user wants to record, redirect to record route
return redirect(url_for('record_route'), code=307)
elif choice == '3':
# catching a return to main menu
pass
else:
# invalid choice, try again
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=4)
gather.say(f"That was not a valid selection. To listen to messages, press 1. To record a message, press 2.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
# this block will be run during the very first POST to this route, signaling a new incoming call
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=3, finishOnKey='#')
gather.say(f"Main menu. You have {message_count} messages. To listen to messages, press 1. To record a message, press 2. If you are finished, you may hang up.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
@app.route('/record', methods=['GET', 'POST'])
def record_route():
'''
The route responsible for handling recording a new Message.
From a fresh state we simply prompt a Record action.
If the 'RecordingSid' and 'RecordingUrl' and 'RecordingStatus' vars are present, we are returning to this
menu directly after recording a Message (so handle creating a new Message).
After the new Message has been created and committed, we then redirect back to the 'main_menu_route'
'''
resp = VoiceResponse()
if 'RecordingSid' and 'RecordingUrl' in request.values:
# we have a recording from Twilio, create a new Message and save it. Redirect to main menu.
url = request.values['RecordingUrl']
message = Messages(crew_id=session['sessionCrewId'], url=url)
db.session.add(message)
# if someone is recording messages, we can assume this is an active crew
crew = Crews.query.get(session['sessionCrewId'])
crew.status = 'active'
db.session.add(crew)
db.session.commit()
return redirect(url_for('main_menu_route'), code=307)
record = Record(action=app.config['APP_BASE_URL'] + url_for('record_route'), method='POST', maxLength=app.config['TWILIO_RECORDING_MAXLENGTH'], playBeep=True, timeout=3, finishOnKey='#')
say = Say(f"Record your message after the beep. Press the pound key when you are finished.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(say)
resp.append(record)
return str(resp)
@app.route('/play', methods=['GET', 'POST'])
def play_route():
'''
This route is responsible for playing existing Messages.
We want to list Messages in reverse chronological order, so newest Message plays first.
As we will allow the user to skip through to the next Message at any time by hitting 1, we will want
to keep the Message list in a session; this list will contain the UUID of the Messages available.
Instead of trying to keep a cursor for our position in the list, we will pop off each Message as it is
played. This of course means we cannot go backwards in our list - we will just prompt the user to go back
to the main menu and start again.
'''
resp = VoiceResponse()
message_list = session['messages']
if not message_list:
# no messages to play, send back to main menu
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('main_menu_route'), method='POST', input="dtmf", num_digits=1, timeout=3)
gather.say(f"There are no more messages. Press three to return to the main menu, or simply hang up.", voice=app.config['TWILIO_VOICE_SETTING'])
resp.append(gather)
return str(resp)
if 'Digits' in request.values:
"""
We really only need to look for a prompt for the main menu.
If the user wants to skip, this endpoint gets reloaded and the session will pop off
the next message automatically.
"""
choice = str(request.values['Digits'])
if choice == '3':
# user wants to go to main menu
return redirect(url_for('main_menu_route'), code=307)
message_list = session['messages']
message_to_play = message_list.pop(0)
gather = Gather(action=app.config['APP_BASE_URL'] + url_for('play_route'), method='POST', input="dtmf", num_digits=1)
gather.say(f"Playing message. To skip to the next message, press 1. To go back to the main menu, press 3", voice=app.config['TWILIO_VOICE_SETTING'])
gather.play(f"{ message_to_play['url']}.mp3")
resp.append(gather)
return str(resp)
@app.route('/hangup', methods=['GET', 'POST'])
def hangup_route():
'''
We don't need a hangup route, but having one would be nice so we can clean up
the sessions and end the call properly.
'''
session.clear()
resp = VoiceResponse()
hangup = Hangup()
resp.append(hangup)
return str(resp)
@app.errorhandler(404)
def page_not_found(e):
'''
Handle any pages not found.
'''
return render_template('404.html'), 404
@app.errorhandler(500)
def system_err(e):
"""
Handling a 500 error is simply to prevent display of debugging info or a browser-generated
error.
You should attempt to alert the system admin if this endpoint is ever reached, but monitoring
and error collection is out of scope for this iteration of the project.
"""
return render_template('500.html'), 500
'''
TODO items
TODO - build out crew members dashboard
TODO - change crew status to active after any message activity
TODO - fix dashboard mobile nav
TODO - fix messages mobile nav
TODO - the JS location.assign after message del is losing the flash success message.
TODO - crew invitation system
''' | en | 0.906088 | Rally Call This app is designed to be a backup communication hub which allows persons to call in and either leave a voice memo or listen to voice memos from other members of their group. Copyright 2021 <NAME> <<EMAIL>> This application is licensed under the terms of BSD-3. Please refer to LICENSE in the project repository for details. Bootstrap the Flask app. On first request we want to make sure our app is properly bootstrapped. This involves setting up our user roles and pre-generating an admin user. We also want to make sure our Twilio inbound number webhook is properly set (you will have to set up your Twilio account and buy a number prior to booting this app.) We have three roles: 1. System Admin - The super admin who controls the entire system - unlimited permissions. 2. Account Admin - The owner of a Crew. These permissions allow administering the Crew and its members. 3. Basic User - A member of a Crew, least priviledges. *This func will only run when the first _request_ is made, not when the app starts* To properly bootstrap the app, start the app and then visit / or /login, then promptly use the Reset Password function to change the System Admin password. #check for roles #check for system admin user # Update our Twilio account to make sure the inbound number we're using for this app # has a proper webhook setting. # Set the webhook for our inbound number to the 'account_lookup_route' entrypoint for our inbound calls. # This block will make sure that our Twilio account is always up to date in terms of what the fqd # endpoint is. This makes it easy to port this app from dev to prod and even to new domains without # having to log into Twilio and make changes manually. # The SMS URL is deliberately set to the empty string to reject any SMS sent to the number. Define a template filter to display the Message creation date in a more human-friendly form. Then make sure to register the new filter. Define a template filter to display UTC dates Make sure to include the crew object in each route for use in the template. Make sure to include the global configs in each route for use in the template. The public homepage. We're going to use the Flask-Security built in /register endpoint to register a user, but we will also need to assign this user a role and bootstrap their Crew (any user using /register will be a Crew admin, basic users do not need to register). Hook into the user_registered Signal to make these changes when someone registers. 1. Set user's role to 'crew_admin' 2. Bootstrap the Crew # this next piece should maybe be wrapped in a try block. # if the user role wasn't found or couldn't be added to the user, # what would we do? delete the user and tell them to register again? # we wouldn't want a random user in the system with no role or crew. Display all Messages for the Crew that are not deleted. Route just for deleting Messages. We don't actually do a physical delete though, just set status to 'deleted' This route does not return a view, it redirects to the messages route. Option for the Crew Admin to add a Crew Name and 'protect' the account by adding an Access Code. Can also delete the Crew from this view. System Admin route for viewing all Crews in the system. System Admin route for viewing details of a single Crew. Can delete or suspend Crew from this view. System admin handler for 'deleting' a Crew. TODO: This can be refactored along with the crew-admin delete endpoint by adding Crew methods for deleting users and the Crew itself instead of repeating code in multiple endpoint handlers. Deactivate all Crew users and change their status. active=False will not allow them to log in. Change the Crew status to deleted This handler will look for the 'action' param to determine whether we are going to restore or suspend the account. Set status of the Crew to suspended. This will not prevent the admin or users from logging in, but they cannot listen to or record messages. Restore a Crew. This will change the status of the Crew and restore login access for all users. This could conflict with a user that was previously removed from a Crew, but I'm not sure how to solve this given the structure we currently have. YOLO. Crews route that is currently strictly for deleting the entire account. Will prompt for the admin's password and then: - deactivate all members - deactivate the admin - set the status of the Crew to deleted Request the admin's password before we destroy the account Deactivate all Crew users and change their status. active=False will not allow them to log in. Change the Crew status to deleted # finally, log out the user This is the Twilio entrypoint for all incoming new calls. This endpoint will play a welcome message and then gather the Account PIN to identify the account. We will set a session var and allow 4 tries to get the Account PIN correct, otherwise we end the call (just basic brute force protection). If this request contains the 'Digits' var, then we have collected the Account PIN from the user and must verify it. Successful verification will send the call to the 'main_menu_route' to offer the user the main menu. If the Crew account contains an Access Code that is not None, we must send the call to the 'check_pin_route' endpoint. Our check for whether or not this step is required will be determined by setting a session var 'Protected_Session'. This var will be instantiated to False when created and must be changed to True in order to allow playing or recording of Messages. # if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin # basic pin brute force protection that limits tries to 4 # crew not found or bad pin # crew is deleted or deactivated by an admin, kill call # crew is using an access code, send to the check pin route # crew exists and is in good standing, proceed # this block will be run during the very first POST to this route, signaling a new incoming call If the Crew account is "protected" then we must direct to this endpoint for collection of the account Access Code. Since the user may use either the Crew acccess code or their individual access code, we have a Crews func that will gather up all possible codes and load them in a list. We just check the list to see if what the user input matches any of the entries. Again, not super secure, this is just a best intentions effort to protect messages. This endpoint contains a basic brute force protection which will only allow 4 attempts before ending the call. # basic pin brute force protection that limits tries to 4 # all is good, send to the menu The primary route for our Twilio voice menu. Hitting this route with no request params means we are starting fresh with a new call. At this stage we should provide a Message count and offer to play Messages or record a new Message. We will use 1 to listen to Messages and 2 to record a Message. The presence of 'Digits' in our request vars will indicate which route we will redirect to: 1 - redirect to the 'play_route' 2 - redirect to the 'record_route' # at this point sessionCrew should be set, if not, kill # need to adjust this to exclude 'deleted' status messages # grab all Messages and put them in the session # we will do this every time as this can change each time the user hits this endpoint # if the request contains 'Digits' we know this is the result of a Gather and must check for the account pin # user wants to listen, redirect to play route # user wants to record, redirect to record route # catching a return to main menu # invalid choice, try again # this block will be run during the very first POST to this route, signaling a new incoming call The route responsible for handling recording a new Message. From a fresh state we simply prompt a Record action. If the 'RecordingSid' and 'RecordingUrl' and 'RecordingStatus' vars are present, we are returning to this menu directly after recording a Message (so handle creating a new Message). After the new Message has been created and committed, we then redirect back to the 'main_menu_route' # we have a recording from Twilio, create a new Message and save it. Redirect to main menu. # if someone is recording messages, we can assume this is an active crew This route is responsible for playing existing Messages. We want to list Messages in reverse chronological order, so newest Message plays first. As we will allow the user to skip through to the next Message at any time by hitting 1, we will want to keep the Message list in a session; this list will contain the UUID of the Messages available. Instead of trying to keep a cursor for our position in the list, we will pop off each Message as it is played. This of course means we cannot go backwards in our list - we will just prompt the user to go back to the main menu and start again. # no messages to play, send back to main menu We really only need to look for a prompt for the main menu. If the user wants to skip, this endpoint gets reloaded and the session will pop off the next message automatically. # user wants to go to main menu We don't need a hangup route, but having one would be nice so we can clean up the sessions and end the call properly. Handle any pages not found. Handling a 500 error is simply to prevent display of debugging info or a browser-generated error. You should attempt to alert the system admin if this endpoint is ever reached, but monitoring and error collection is out of scope for this iteration of the project. TODO items TODO - build out crew members dashboard TODO - change crew status to active after any message activity TODO - fix dashboard mobile nav TODO - fix messages mobile nav TODO - the JS location.assign after message del is losing the flash success message. TODO - crew invitation system | 2.219039 | 2 |
ooobuild/lo/accessibility/x_accessible_text_markup.py | Amourspirit/ooo_uno_tmpl | 0 | 6618144 | <filename>ooobuild/lo/accessibility/x_accessible_text_markup.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.accessibility
import typing
from abc import abstractmethod
from .x_accessible_text import XAccessibleText as XAccessibleText_5b77105b
if typing.TYPE_CHECKING:
from .text_segment import TextSegment as TextSegment_1e5b0ee8
class XAccessibleTextMarkup(XAccessibleText_5b77105b):
"""
Implement this interface to expose the text markups of a text.
The XAccessibleTextMarkup interface is the main interface to expose text markups in a text, typically of a text document, that are used to reference other (parts of) documents. For supporting the XAccessibleTextMarkup.getTextMarkupIndex() method of this interface and other character related methods of the XAccessibleTextMarkup interface, it is necessary to also support the XAccessibleText interface.
**since**
OOo 3.0
See Also:
`API XAccessibleTextMarkup <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1accessibility_1_1XAccessibleTextMarkup.html>`_
"""
__ooo_ns__: str = 'com.sun.star.accessibility'
__ooo_full_ns__: str = 'com.sun.star.accessibility.XAccessibleTextMarkup'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.accessibility.XAccessibleTextMarkup'
@abstractmethod
def getTextMarkup(self, TextMarkupIndex: int, TextMarkupType: int) -> 'TextSegment_1e5b0ee8':
"""
Returns the text segment of the text markup of the given index and of the given text mark type.
Throws IndexOutOfBoundsException, if given index is out of valid range.
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException``
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getTextMarkupAtIndex(self, CharIndex: int, TextMarkupType: int) -> 'typing.Tuple[TextSegment_1e5b0ee8, ...]':
"""
returns a sequence of the text segments of the text markups at the given character index and of the given text markup type.
Throws IndexOutOfBoundsException, if given character index is out of range [0..number of characters in the text).
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException``
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getTextMarkupCount(self, TextMarkupType: int) -> int:
"""
Returns the number of text markup of the given text markup type of a text.
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
__all__ = ['XAccessibleTextMarkup']
| <filename>ooobuild/lo/accessibility/x_accessible_text_markup.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.accessibility
import typing
from abc import abstractmethod
from .x_accessible_text import XAccessibleText as XAccessibleText_5b77105b
if typing.TYPE_CHECKING:
from .text_segment import TextSegment as TextSegment_1e5b0ee8
class XAccessibleTextMarkup(XAccessibleText_5b77105b):
"""
Implement this interface to expose the text markups of a text.
The XAccessibleTextMarkup interface is the main interface to expose text markups in a text, typically of a text document, that are used to reference other (parts of) documents. For supporting the XAccessibleTextMarkup.getTextMarkupIndex() method of this interface and other character related methods of the XAccessibleTextMarkup interface, it is necessary to also support the XAccessibleText interface.
**since**
OOo 3.0
See Also:
`API XAccessibleTextMarkup <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1accessibility_1_1XAccessibleTextMarkup.html>`_
"""
__ooo_ns__: str = 'com.sun.star.accessibility'
__ooo_full_ns__: str = 'com.sun.star.accessibility.XAccessibleTextMarkup'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.accessibility.XAccessibleTextMarkup'
@abstractmethod
def getTextMarkup(self, TextMarkupIndex: int, TextMarkupType: int) -> 'TextSegment_1e5b0ee8':
"""
Returns the text segment of the text markup of the given index and of the given text mark type.
Throws IndexOutOfBoundsException, if given index is out of valid range.
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException``
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getTextMarkupAtIndex(self, CharIndex: int, TextMarkupType: int) -> 'typing.Tuple[TextSegment_1e5b0ee8, ...]':
"""
returns a sequence of the text segments of the text markups at the given character index and of the given text markup type.
Throws IndexOutOfBoundsException, if given character index is out of range [0..number of characters in the text).
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException``
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getTextMarkupCount(self, TextMarkupType: int) -> int:
"""
Returns the number of text markup of the given text markup type of a text.
Throws IllegalArgumentException, if given text markup type is out of valid range.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
__all__ = ['XAccessibleTextMarkup']
| en | 0.549384 | # coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.accessibility Implement this interface to expose the text markups of a text. The XAccessibleTextMarkup interface is the main interface to expose text markups in a text, typically of a text document, that are used to reference other (parts of) documents. For supporting the XAccessibleTextMarkup.getTextMarkupIndex() method of this interface and other character related methods of the XAccessibleTextMarkup interface, it is necessary to also support the XAccessibleText interface. **since** OOo 3.0 See Also: `API XAccessibleTextMarkup <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1accessibility_1_1XAccessibleTextMarkup.html>`_ Returns the text segment of the text markup of the given index and of the given text mark type. Throws IndexOutOfBoundsException, if given index is out of valid range. Throws IllegalArgumentException, if given text markup type is out of valid range. Raises: com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException`` com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException`` returns a sequence of the text segments of the text markups at the given character index and of the given text markup type. Throws IndexOutOfBoundsException, if given character index is out of range [0..number of characters in the text). Throws IllegalArgumentException, if given text markup type is out of valid range. Raises: com.sun.star.lang.IndexOutOfBoundsException: ``IndexOutOfBoundsException`` com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException`` Returns the number of text markup of the given text markup type of a text. Throws IllegalArgumentException, if given text markup type is out of valid range. Raises: com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException`` | 1.617185 | 2 |
py/tl_pb2_grpc.py | FedorZaytsev/mtproto | 146 | 6618145 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
import tl_pb2 as tl__pb2
class MtprotoStub(object):
"""Procedures
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.InvokeAfterMsg = channel.unary_unary(
'/mtproto.Mtproto/InvokeAfterMsg',
request_serializer=tl__pb2.ReqInvokeAfterMsg.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.InvokeAfterMsgs = channel.unary_unary(
'/mtproto.Mtproto/InvokeAfterMsgs',
request_serializer=tl__pb2.ReqInvokeAfterMsgs.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.AuthCheckPhone = channel.unary_unary(
'/mtproto.Mtproto/AuthCheckPhone',
request_serializer=tl__pb2.ReqAuthCheckPhone.SerializeToString,
response_deserializer=tl__pb2.TypeAuthCheckedPhone.FromString,
)
self.AuthSendCode = channel.unary_unary(
'/mtproto.Mtproto/AuthSendCode',
request_serializer=tl__pb2.ReqAuthSendCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AuthSignUp = channel.unary_unary(
'/mtproto.Mtproto/AuthSignUp',
request_serializer=tl__pb2.ReqAuthSignUp.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AuthSignIn = channel.unary_unary(
'/mtproto.Mtproto/AuthSignIn',
request_serializer=tl__pb2.ReqAuthSignIn.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AuthLogOut = channel.unary_unary(
'/mtproto.Mtproto/AuthLogOut',
request_serializer=tl__pb2.ReqAuthLogOut.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthResetAuthorizations = channel.unary_unary(
'/mtproto.Mtproto/AuthResetAuthorizations',
request_serializer=tl__pb2.ReqAuthResetAuthorizations.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthSendInvites = channel.unary_unary(
'/mtproto.Mtproto/AuthSendInvites',
request_serializer=tl__pb2.ReqAuthSendInvites.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthExportAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthExportAuthorization',
request_serializer=tl__pb2.ReqAuthExportAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthExportedAuthorization.FromString,
)
self.AuthImportAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthImportAuthorization',
request_serializer=tl__pb2.ReqAuthImportAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AccountRegisterDevice = channel.unary_unary(
'/mtproto.Mtproto/AccountRegisterDevice',
request_serializer=tl__pb2.ReqAccountRegisterDevice.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUnregisterDevice = channel.unary_unary(
'/mtproto.Mtproto/AccountUnregisterDevice',
request_serializer=tl__pb2.ReqAccountUnregisterDevice.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateNotifySettings',
request_serializer=tl__pb2.ReqAccountUpdateNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountGetNotifySettings',
request_serializer=tl__pb2.ReqAccountGetNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypePeerNotifySettings.FromString,
)
self.AccountResetNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountResetNotifySettings',
request_serializer=tl__pb2.ReqAccountResetNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateProfile = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateProfile',
request_serializer=tl__pb2.ReqAccountUpdateProfile.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.AccountUpdateStatus = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateStatus',
request_serializer=tl__pb2.ReqAccountUpdateStatus.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetWallPapers = channel.unary_unary(
'/mtproto.Mtproto/AccountGetWallPapers',
request_serializer=tl__pb2.ReqAccountGetWallPapers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorWallPaper.FromString,
)
self.UsersGetUsers = channel.unary_unary(
'/mtproto.Mtproto/UsersGetUsers',
request_serializer=tl__pb2.ReqUsersGetUsers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorUser.FromString,
)
self.UsersGetFullUser = channel.unary_unary(
'/mtproto.Mtproto/UsersGetFullUser',
request_serializer=tl__pb2.ReqUsersGetFullUser.SerializeToString,
response_deserializer=tl__pb2.TypeUserFull.FromString,
)
self.ContactsGetStatuses = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetStatuses',
request_serializer=tl__pb2.ReqContactsGetStatuses.SerializeToString,
response_deserializer=tl__pb2.TypeVectorContactStatus.FromString,
)
self.ContactsGetContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetContacts',
request_serializer=tl__pb2.ReqContactsGetContacts.SerializeToString,
response_deserializer=tl__pb2.TypeContactsContacts.FromString,
)
self.ContactsImportContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsImportContacts',
request_serializer=tl__pb2.ReqContactsImportContacts.SerializeToString,
response_deserializer=tl__pb2.TypeContactsImportedContacts.FromString,
)
self.ContactsSearch = channel.unary_unary(
'/mtproto.Mtproto/ContactsSearch',
request_serializer=tl__pb2.ReqContactsSearch.SerializeToString,
response_deserializer=tl__pb2.TypeContactsFound.FromString,
)
self.ContactsDeleteContact = channel.unary_unary(
'/mtproto.Mtproto/ContactsDeleteContact',
request_serializer=tl__pb2.ReqContactsDeleteContact.SerializeToString,
response_deserializer=tl__pb2.TypeContactsLink.FromString,
)
self.ContactsDeleteContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsDeleteContacts',
request_serializer=tl__pb2.ReqContactsDeleteContacts.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsBlock = channel.unary_unary(
'/mtproto.Mtproto/ContactsBlock',
request_serializer=tl__pb2.ReqContactsBlock.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsUnblock = channel.unary_unary(
'/mtproto.Mtproto/ContactsUnblock',
request_serializer=tl__pb2.ReqContactsUnblock.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsGetBlocked = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetBlocked',
request_serializer=tl__pb2.ReqContactsGetBlocked.SerializeToString,
response_deserializer=tl__pb2.TypeContactsBlocked.FromString,
)
self.MessagesGetMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessages',
request_serializer=tl__pb2.ReqMessagesGetMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesGetDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDialogs',
request_serializer=tl__pb2.ReqMessagesGetDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesDialogs.FromString,
)
self.MessagesGetHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetHistory',
request_serializer=tl__pb2.ReqMessagesGetHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesSearch = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearch',
request_serializer=tl__pb2.ReqMessagesSearch.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesReadHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadHistory',
request_serializer=tl__pb2.ReqMessagesReadHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.MessagesDeleteHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteHistory',
request_serializer=tl__pb2.ReqMessagesDeleteHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedHistory.FromString,
)
self.MessagesDeleteMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteMessages',
request_serializer=tl__pb2.ReqMessagesDeleteMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.MessagesReceivedMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesReceivedMessages',
request_serializer=tl__pb2.ReqMessagesReceivedMessages.SerializeToString,
response_deserializer=tl__pb2.TypeVectorReceivedNotifyMessage.FromString,
)
self.MessagesSetTyping = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetTyping',
request_serializer=tl__pb2.ReqMessagesSetTyping.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendMessage',
request_serializer=tl__pb2.ReqMessagesSendMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSendMedia = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendMedia',
request_serializer=tl__pb2.ReqMessagesSendMedia.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesForwardMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesForwardMessages',
request_serializer=tl__pb2.ReqMessagesForwardMessages.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetChats',
request_serializer=tl__pb2.ReqMessagesGetChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.MessagesGetFullChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFullChat',
request_serializer=tl__pb2.ReqMessagesGetFullChat.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChatFull.FromString,
)
self.MessagesEditChatTitle = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatTitle',
request_serializer=tl__pb2.ReqMessagesEditChatTitle.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditChatPhoto = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatPhoto',
request_serializer=tl__pb2.ReqMessagesEditChatPhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesAddChatUser = channel.unary_unary(
'/mtproto.Mtproto/MessagesAddChatUser',
request_serializer=tl__pb2.ReqMessagesAddChatUser.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesDeleteChatUser = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteChatUser',
request_serializer=tl__pb2.ReqMessagesDeleteChatUser.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesCreateChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesCreateChat',
request_serializer=tl__pb2.ReqMessagesCreateChat.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.UpdatesGetState = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetState',
request_serializer=tl__pb2.ReqUpdatesGetState.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesState.FromString,
)
self.UpdatesGetDifference = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetDifference',
request_serializer=tl__pb2.ReqUpdatesGetDifference.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesDifference.FromString,
)
self.PhotosUpdateProfilePhoto = channel.unary_unary(
'/mtproto.Mtproto/PhotosUpdateProfilePhoto',
request_serializer=tl__pb2.ReqPhotosUpdateProfilePhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUserProfilePhoto.FromString,
)
self.PhotosUploadProfilePhoto = channel.unary_unary(
'/mtproto.Mtproto/PhotosUploadProfilePhoto',
request_serializer=tl__pb2.ReqPhotosUploadProfilePhoto.SerializeToString,
response_deserializer=tl__pb2.TypePhotosPhoto.FromString,
)
self.UploadSaveFilePart = channel.unary_unary(
'/mtproto.Mtproto/UploadSaveFilePart',
request_serializer=tl__pb2.ReqUploadSaveFilePart.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetFile',
request_serializer=tl__pb2.ReqUploadGetFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadFile.FromString,
)
self.HelpGetConfig = channel.unary_unary(
'/mtproto.Mtproto/HelpGetConfig',
request_serializer=tl__pb2.ReqHelpGetConfig.SerializeToString,
response_deserializer=tl__pb2.TypeConfig.FromString,
)
self.HelpGetNearestDc = channel.unary_unary(
'/mtproto.Mtproto/HelpGetNearestDc',
request_serializer=tl__pb2.ReqHelpGetNearestDc.SerializeToString,
response_deserializer=tl__pb2.TypeNearestDc.FromString,
)
self.HelpGetAppUpdate = channel.unary_unary(
'/mtproto.Mtproto/HelpGetAppUpdate',
request_serializer=tl__pb2.ReqHelpGetAppUpdate.SerializeToString,
response_deserializer=tl__pb2.TypeHelpAppUpdate.FromString,
)
self.HelpSaveAppLog = channel.unary_unary(
'/mtproto.Mtproto/HelpSaveAppLog',
request_serializer=tl__pb2.ReqHelpSaveAppLog.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.HelpGetInviteText = channel.unary_unary(
'/mtproto.Mtproto/HelpGetInviteText',
request_serializer=tl__pb2.ReqHelpGetInviteText.SerializeToString,
response_deserializer=tl__pb2.TypeHelpInviteText.FromString,
)
self.PhotosDeletePhotos = channel.unary_unary(
'/mtproto.Mtproto/PhotosDeletePhotos',
request_serializer=tl__pb2.ReqPhotosDeletePhotos.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLong.FromString,
)
self.PhotosGetUserPhotos = channel.unary_unary(
'/mtproto.Mtproto/PhotosGetUserPhotos',
request_serializer=tl__pb2.ReqPhotosGetUserPhotos.SerializeToString,
response_deserializer=tl__pb2.TypePhotosPhotos.FromString,
)
self.MessagesForwardMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesForwardMessage',
request_serializer=tl__pb2.ReqMessagesForwardMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetDhConfig = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDhConfig',
request_serializer=tl__pb2.ReqMessagesGetDhConfig.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesDhConfig.FromString,
)
self.MessagesRequestEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesRequestEncryption',
request_serializer=tl__pb2.ReqMessagesRequestEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeEncryptedChat.FromString,
)
self.MessagesAcceptEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesAcceptEncryption',
request_serializer=tl__pb2.ReqMessagesAcceptEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeEncryptedChat.FromString,
)
self.MessagesDiscardEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesDiscardEncryption',
request_serializer=tl__pb2.ReqMessagesDiscardEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetEncryptedTyping = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetEncryptedTyping',
request_serializer=tl__pb2.ReqMessagesSetEncryptedTyping.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReadEncryptedHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadEncryptedHistory',
request_serializer=tl__pb2.ReqMessagesReadEncryptedHistory.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendEncrypted = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncrypted',
request_serializer=tl__pb2.ReqMessagesSendEncrypted.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesSendEncryptedFile = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncryptedFile',
request_serializer=tl__pb2.ReqMessagesSendEncryptedFile.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesSendEncryptedService = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncryptedService',
request_serializer=tl__pb2.ReqMessagesSendEncryptedService.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesReceivedQueue = channel.unary_unary(
'/mtproto.Mtproto/MessagesReceivedQueue',
request_serializer=tl__pb2.ReqMessagesReceivedQueue.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLong.FromString,
)
self.UploadSaveBigFilePart = channel.unary_unary(
'/mtproto.Mtproto/UploadSaveBigFilePart',
request_serializer=tl__pb2.ReqUploadSaveBigFilePart.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.InitConnection = channel.unary_unary(
'/mtproto.Mtproto/InitConnection',
request_serializer=tl__pb2.ReqInitConnection.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.HelpGetSupport = channel.unary_unary(
'/mtproto.Mtproto/HelpGetSupport',
request_serializer=tl__pb2.ReqHelpGetSupport.SerializeToString,
response_deserializer=tl__pb2.TypeHelpSupport.FromString,
)
self.AuthBindTempAuthKey = channel.unary_unary(
'/mtproto.Mtproto/AuthBindTempAuthKey',
request_serializer=tl__pb2.ReqAuthBindTempAuthKey.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsExportCard = channel.unary_unary(
'/mtproto.Mtproto/ContactsExportCard',
request_serializer=tl__pb2.ReqContactsExportCard.SerializeToString,
response_deserializer=tl__pb2.TypeVectorInt.FromString,
)
self.ContactsImportCard = channel.unary_unary(
'/mtproto.Mtproto/ContactsImportCard',
request_serializer=tl__pb2.ReqContactsImportCard.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.MessagesReadMessageContents = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadMessageContents',
request_serializer=tl__pb2.ReqMessagesReadMessageContents.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.AccountCheckUsername = channel.unary_unary(
'/mtproto.Mtproto/AccountCheckUsername',
request_serializer=tl__pb2.ReqAccountCheckUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateUsername = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateUsername',
request_serializer=tl__pb2.ReqAccountUpdateUsername.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.AccountGetPrivacy = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPrivacy',
request_serializer=tl__pb2.ReqAccountGetPrivacy.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPrivacyRules.FromString,
)
self.AccountSetPrivacy = channel.unary_unary(
'/mtproto.Mtproto/AccountSetPrivacy',
request_serializer=tl__pb2.ReqAccountSetPrivacy.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPrivacyRules.FromString,
)
self.AccountDeleteAccount = channel.unary_unary(
'/mtproto.Mtproto/AccountDeleteAccount',
request_serializer=tl__pb2.ReqAccountDeleteAccount.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetAccountTTL = channel.unary_unary(
'/mtproto.Mtproto/AccountGetAccountTTL',
request_serializer=tl__pb2.ReqAccountGetAccountTTL.SerializeToString,
response_deserializer=tl__pb2.TypeAccountDaysTTL.FromString,
)
self.AccountSetAccountTTL = channel.unary_unary(
'/mtproto.Mtproto/AccountSetAccountTTL',
request_serializer=tl__pb2.ReqAccountSetAccountTTL.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.InvokeWithLayer = channel.unary_unary(
'/mtproto.Mtproto/InvokeWithLayer',
request_serializer=tl__pb2.ReqInvokeWithLayer.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.ContactsResolveUsername = channel.unary_unary(
'/mtproto.Mtproto/ContactsResolveUsername',
request_serializer=tl__pb2.ReqContactsResolveUsername.SerializeToString,
response_deserializer=tl__pb2.TypeContactsResolvedPeer.FromString,
)
self.AccountSendChangePhoneCode = channel.unary_unary(
'/mtproto.Mtproto/AccountSendChangePhoneCode',
request_serializer=tl__pb2.ReqAccountSendChangePhoneCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AccountChangePhone = channel.unary_unary(
'/mtproto.Mtproto/AccountChangePhone',
request_serializer=tl__pb2.ReqAccountChangePhone.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.MessagesGetAllStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllStickers',
request_serializer=tl__pb2.ReqMessagesGetAllStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAllStickers.FromString,
)
self.AccountUpdateDeviceLocked = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateDeviceLocked',
request_serializer=tl__pb2.ReqAccountUpdateDeviceLocked.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetPassword = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPassword',
request_serializer=tl__pb2.ReqAccountGetPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPassword.FromString,
)
self.AuthCheckPassword = channel.unary_unary(
'/mtproto.Mtproto/AuthCheckPassword',
request_serializer=tl__pb2.ReqAuthCheckPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.MessagesGetWebPagePreview = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetWebPagePreview',
request_serializer=tl__pb2.ReqMessagesGetWebPagePreview.SerializeToString,
response_deserializer=tl__pb2.TypeMessageMedia.FromString,
)
self.AccountGetAuthorizations = channel.unary_unary(
'/mtproto.Mtproto/AccountGetAuthorizations',
request_serializer=tl__pb2.ReqAccountGetAuthorizations.SerializeToString,
response_deserializer=tl__pb2.TypeAccountAuthorizations.FromString,
)
self.AccountResetAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AccountResetAuthorization',
request_serializer=tl__pb2.ReqAccountResetAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetPasswordSettings = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPasswordSettings',
request_serializer=tl__pb2.ReqAccountGetPasswordSettings.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPasswordSettings.FromString,
)
self.AccountUpdatePasswordSettings = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdatePasswordSettings',
request_serializer=tl__pb2.ReqAccountUpdatePasswordSettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthRequestPasswordRecovery = channel.unary_unary(
'/mtproto.Mtproto/AuthRequestPasswordRecovery',
request_serializer=tl__pb2.ReqAuthRequestPasswordRecovery.SerializeToString,
response_deserializer=tl__pb2.TypeAuthPasswordRecovery.FromString,
)
self.AuthRecoverPassword = channel.unary_unary(
'/mtproto.Mtproto/AuthRecoverPassword',
request_serializer=tl__pb2.ReqAuthRecoverPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.InvokeWithoutUpdates = channel.unary_unary(
'/mtproto.Mtproto/InvokeWithoutUpdates',
request_serializer=tl__pb2.ReqInvokeWithoutUpdates.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.MessagesExportChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesExportChatInvite',
request_serializer=tl__pb2.ReqMessagesExportChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeExportedChatInvite.FromString,
)
self.MessagesCheckChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesCheckChatInvite',
request_serializer=tl__pb2.ReqMessagesCheckChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeChatInvite.FromString,
)
self.MessagesImportChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesImportChatInvite',
request_serializer=tl__pb2.ReqMessagesImportChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetStickerSet',
request_serializer=tl__pb2.ReqMessagesGetStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.MessagesInstallStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesInstallStickerSet',
request_serializer=tl__pb2.ReqMessagesInstallStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSetInstallResult.FromString,
)
self.MessagesUninstallStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesUninstallStickerSet',
request_serializer=tl__pb2.ReqMessagesUninstallStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthImportBotAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthImportBotAuthorization',
request_serializer=tl__pb2.ReqAuthImportBotAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.MessagesStartBot = channel.unary_unary(
'/mtproto.Mtproto/MessagesStartBot',
request_serializer=tl__pb2.ReqMessagesStartBot.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.HelpGetAppChangelog = channel.unary_unary(
'/mtproto.Mtproto/HelpGetAppChangelog',
request_serializer=tl__pb2.ReqHelpGetAppChangelog.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesReportSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesReportSpam',
request_serializer=tl__pb2.ReqMessagesReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMessagesViews = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessagesViews',
request_serializer=tl__pb2.ReqMessagesGetMessagesViews.SerializeToString,
response_deserializer=tl__pb2.TypeVectorInt.FromString,
)
self.UpdatesGetChannelDifference = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetChannelDifference',
request_serializer=tl__pb2.ReqUpdatesGetChannelDifference.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesChannelDifference.FromString,
)
self.ChannelsReadHistory = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReadHistory',
request_serializer=tl__pb2.ReqChannelsReadHistory.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsDeleteMessages = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteMessages',
request_serializer=tl__pb2.ReqChannelsDeleteMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.ChannelsDeleteUserHistory = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteUserHistory',
request_serializer=tl__pb2.ReqChannelsDeleteUserHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedHistory.FromString,
)
self.ChannelsReportSpam = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReportSpam',
request_serializer=tl__pb2.ReqChannelsReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsGetMessages = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetMessages',
request_serializer=tl__pb2.ReqChannelsGetMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.ChannelsGetParticipants = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetParticipants',
request_serializer=tl__pb2.ReqChannelsGetParticipants.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsChannelParticipants.FromString,
)
self.ChannelsGetParticipant = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetParticipant',
request_serializer=tl__pb2.ReqChannelsGetParticipant.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsChannelParticipant.FromString,
)
self.ChannelsGetChannels = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetChannels',
request_serializer=tl__pb2.ReqChannelsGetChannels.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.ChannelsGetFullChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetFullChannel',
request_serializer=tl__pb2.ReqChannelsGetFullChannel.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChatFull.FromString,
)
self.ChannelsCreateChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsCreateChannel',
request_serializer=tl__pb2.ReqChannelsCreateChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditAbout = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditAbout',
request_serializer=tl__pb2.ReqChannelsEditAbout.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsEditAdmin = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditAdmin',
request_serializer=tl__pb2.ReqChannelsEditAdmin.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditTitle = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditTitle',
request_serializer=tl__pb2.ReqChannelsEditTitle.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditPhoto = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditPhoto',
request_serializer=tl__pb2.ReqChannelsEditPhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsCheckUsername = channel.unary_unary(
'/mtproto.Mtproto/ChannelsCheckUsername',
request_serializer=tl__pb2.ReqChannelsCheckUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsUpdateUsername = channel.unary_unary(
'/mtproto.Mtproto/ChannelsUpdateUsername',
request_serializer=tl__pb2.ReqChannelsUpdateUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsJoinChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsJoinChannel',
request_serializer=tl__pb2.ReqChannelsJoinChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsLeaveChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsLeaveChannel',
request_serializer=tl__pb2.ReqChannelsLeaveChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsInviteToChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsInviteToChannel',
request_serializer=tl__pb2.ReqChannelsInviteToChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsExportInvite = channel.unary_unary(
'/mtproto.Mtproto/ChannelsExportInvite',
request_serializer=tl__pb2.ReqChannelsExportInvite.SerializeToString,
response_deserializer=tl__pb2.TypeExportedChatInvite.FromString,
)
self.ChannelsDeleteChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteChannel',
request_serializer=tl__pb2.ReqChannelsDeleteChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesToggleChatAdmins = channel.unary_unary(
'/mtproto.Mtproto/MessagesToggleChatAdmins',
request_serializer=tl__pb2.ReqMessagesToggleChatAdmins.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditChatAdmin = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatAdmin',
request_serializer=tl__pb2.ReqMessagesEditChatAdmin.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesMigrateChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesMigrateChat',
request_serializer=tl__pb2.ReqMessagesMigrateChat.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSearchGlobal = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearchGlobal',
request_serializer=tl__pb2.ReqMessagesSearchGlobal.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.AccountReportPeer = channel.unary_unary(
'/mtproto.Mtproto/AccountReportPeer',
request_serializer=tl__pb2.ReqAccountReportPeer.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReorderStickerSets = channel.unary_unary(
'/mtproto.Mtproto/MessagesReorderStickerSets',
request_serializer=tl__pb2.ReqMessagesReorderStickerSets.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.HelpGetTermsOfService = channel.unary_unary(
'/mtproto.Mtproto/HelpGetTermsOfService',
request_serializer=tl__pb2.ReqHelpGetTermsOfService.SerializeToString,
response_deserializer=tl__pb2.TypeHelpTermsOfService.FromString,
)
self.MessagesGetDocumentByHash = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDocumentByHash',
request_serializer=tl__pb2.ReqMessagesGetDocumentByHash.SerializeToString,
response_deserializer=tl__pb2.TypeDocument.FromString,
)
self.MessagesSearchGifs = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearchGifs',
request_serializer=tl__pb2.ReqMessagesSearchGifs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFoundGifs.FromString,
)
self.MessagesGetSavedGifs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetSavedGifs',
request_serializer=tl__pb2.ReqMessagesGetSavedGifs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSavedGifs.FromString,
)
self.MessagesSaveGif = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveGif',
request_serializer=tl__pb2.ReqMessagesSaveGif.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetInlineBotResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetInlineBotResults',
request_serializer=tl__pb2.ReqMessagesGetInlineBotResults.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesBotResults.FromString,
)
self.MessagesSetInlineBotResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetInlineBotResults',
request_serializer=tl__pb2.ReqMessagesSetInlineBotResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendInlineBotResult = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendInlineBotResult',
request_serializer=tl__pb2.ReqMessagesSendInlineBotResult.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsToggleInvites = channel.unary_unary(
'/mtproto.Mtproto/ChannelsToggleInvites',
request_serializer=tl__pb2.ReqChannelsToggleInvites.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsExportMessageLink = channel.unary_unary(
'/mtproto.Mtproto/ChannelsExportMessageLink',
request_serializer=tl__pb2.ReqChannelsExportMessageLink.SerializeToString,
response_deserializer=tl__pb2.TypeExportedMessageLink.FromString,
)
self.ChannelsToggleSignatures = channel.unary_unary(
'/mtproto.Mtproto/ChannelsToggleSignatures',
request_serializer=tl__pb2.ReqChannelsToggleSignatures.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesHideReportSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesHideReportSpam',
request_serializer=tl__pb2.ReqMessagesHideReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPeerSettings = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPeerSettings',
request_serializer=tl__pb2.ReqMessagesGetPeerSettings.SerializeToString,
response_deserializer=tl__pb2.TypePeerSettings.FromString,
)
self.ChannelsUpdatePinnedMessage = channel.unary_unary(
'/mtproto.Mtproto/ChannelsUpdatePinnedMessage',
request_serializer=tl__pb2.ReqChannelsUpdatePinnedMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.AuthResendCode = channel.unary_unary(
'/mtproto.Mtproto/AuthResendCode',
request_serializer=tl__pb2.ReqAuthResendCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AuthCancelCode = channel.unary_unary(
'/mtproto.Mtproto/AuthCancelCode',
request_serializer=tl__pb2.ReqAuthCancelCode.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMessageEditData = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessageEditData',
request_serializer=tl__pb2.ReqMessagesGetMessageEditData.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessageEditData.FromString,
)
self.MessagesEditMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditMessage',
request_serializer=tl__pb2.ReqMessagesEditMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditInlineBotMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditInlineBotMessage',
request_serializer=tl__pb2.ReqMessagesEditInlineBotMessage.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetBotCallbackAnswer = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetBotCallbackAnswer',
request_serializer=tl__pb2.ReqMessagesGetBotCallbackAnswer.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesBotCallbackAnswer.FromString,
)
self.MessagesSetBotCallbackAnswer = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotCallbackAnswer',
request_serializer=tl__pb2.ReqMessagesSetBotCallbackAnswer.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsGetTopPeers = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetTopPeers',
request_serializer=tl__pb2.ReqContactsGetTopPeers.SerializeToString,
response_deserializer=tl__pb2.TypeContactsTopPeers.FromString,
)
self.ContactsResetTopPeerRating = channel.unary_unary(
'/mtproto.Mtproto/ContactsResetTopPeerRating',
request_serializer=tl__pb2.ReqContactsResetTopPeerRating.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPeerDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPeerDialogs',
request_serializer=tl__pb2.ReqMessagesGetPeerDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesPeerDialogs.FromString,
)
self.MessagesSaveDraft = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveDraft',
request_serializer=tl__pb2.ReqMessagesSaveDraft.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetAllDrafts = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllDrafts',
request_serializer=tl__pb2.ReqMessagesGetAllDrafts.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.AccountSendConfirmPhoneCode = channel.unary_unary(
'/mtproto.Mtproto/AccountSendConfirmPhoneCode',
request_serializer=tl__pb2.ReqAccountSendConfirmPhoneCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AccountConfirmPhone = channel.unary_unary(
'/mtproto.Mtproto/AccountConfirmPhone',
request_serializer=tl__pb2.ReqAccountConfirmPhone.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetFeaturedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFeaturedStickers',
request_serializer=tl__pb2.ReqMessagesGetFeaturedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFeaturedStickers.FromString,
)
self.MessagesReadFeaturedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadFeaturedStickers',
request_serializer=tl__pb2.ReqMessagesReadFeaturedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetRecentStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetRecentStickers',
request_serializer=tl__pb2.ReqMessagesGetRecentStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesRecentStickers.FromString,
)
self.MessagesSaveRecentSticker = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveRecentSticker',
request_serializer=tl__pb2.ReqMessagesSaveRecentSticker.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesClearRecentStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesClearRecentStickers',
request_serializer=tl__pb2.ReqMessagesClearRecentStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetArchivedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetArchivedStickers',
request_serializer=tl__pb2.ReqMessagesGetArchivedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesArchivedStickers.FromString,
)
self.ChannelsGetAdminedPublicChannels = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetAdminedPublicChannels',
request_serializer=tl__pb2.ReqChannelsGetAdminedPublicChannels.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.AuthDropTempAuthKeys = channel.unary_unary(
'/mtproto.Mtproto/AuthDropTempAuthKeys',
request_serializer=tl__pb2.ReqAuthDropTempAuthKeys.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetGameScore = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetGameScore',
request_serializer=tl__pb2.ReqMessagesSetGameScore.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSetInlineGameScore = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetInlineGameScore',
request_serializer=tl__pb2.ReqMessagesSetInlineGameScore.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMaskStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMaskStickers',
request_serializer=tl__pb2.ReqMessagesGetMaskStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAllStickers.FromString,
)
self.MessagesGetAttachedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAttachedStickers',
request_serializer=tl__pb2.ReqMessagesGetAttachedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorStickerSetCovered.FromString,
)
self.MessagesGetGameHighScores = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetGameHighScores',
request_serializer=tl__pb2.ReqMessagesGetGameHighScores.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesHighScores.FromString,
)
self.MessagesGetInlineGameHighScores = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetInlineGameHighScores',
request_serializer=tl__pb2.ReqMessagesGetInlineGameHighScores.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesHighScores.FromString,
)
self.MessagesGetCommonChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetCommonChats',
request_serializer=tl__pb2.ReqMessagesGetCommonChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.MessagesGetAllChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllChats',
request_serializer=tl__pb2.ReqMessagesGetAllChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.HelpSetBotUpdatesStatus = channel.unary_unary(
'/mtproto.Mtproto/HelpSetBotUpdatesStatus',
request_serializer=tl__pb2.ReqHelpSetBotUpdatesStatus.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetWebPage = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetWebPage',
request_serializer=tl__pb2.ReqMessagesGetWebPage.SerializeToString,
response_deserializer=tl__pb2.TypeWebPage.FromString,
)
self.MessagesToggleDialogPin = channel.unary_unary(
'/mtproto.Mtproto/MessagesToggleDialogPin',
request_serializer=tl__pb2.ReqMessagesToggleDialogPin.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReorderPinnedDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesReorderPinnedDialogs',
request_serializer=tl__pb2.ReqMessagesReorderPinnedDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPinnedDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPinnedDialogs',
request_serializer=tl__pb2.ReqMessagesGetPinnedDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesPeerDialogs.FromString,
)
self.PhoneRequestCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneRequestCall',
request_serializer=tl__pb2.ReqPhoneRequestCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneAcceptCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneAcceptCall',
request_serializer=tl__pb2.ReqPhoneAcceptCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneDiscardCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneDiscardCall',
request_serializer=tl__pb2.ReqPhoneDiscardCall.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.PhoneReceivedCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneReceivedCall',
request_serializer=tl__pb2.ReqPhoneReceivedCall.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReportEncryptedSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesReportEncryptedSpam',
request_serializer=tl__pb2.ReqMessagesReportEncryptedSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PaymentsGetPaymentForm = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetPaymentForm',
request_serializer=tl__pb2.ReqPaymentsGetPaymentForm.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentForm.FromString,
)
self.PaymentsSendPaymentForm = channel.unary_unary(
'/mtproto.Mtproto/PaymentsSendPaymentForm',
request_serializer=tl__pb2.ReqPaymentsSendPaymentForm.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentResult.FromString,
)
self.AccountGetTmpPassword = channel.unary_unary(
'/mtproto.Mtproto/AccountGetTmpPassword',
request_serializer=tl__pb2.ReqAccountGetTmpPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAccountTmpPassword.FromString,
)
self.MessagesSetBotShippingResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotShippingResults',
request_serializer=tl__pb2.ReqMessagesSetBotShippingResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetBotPrecheckoutResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotPrecheckoutResults',
request_serializer=tl__pb2.ReqMessagesSetBotPrecheckoutResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetWebFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetWebFile',
request_serializer=tl__pb2.ReqUploadGetWebFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadWebFile.FromString,
)
self.BotsSendCustomRequest = channel.unary_unary(
'/mtproto.Mtproto/BotsSendCustomRequest',
request_serializer=tl__pb2.ReqBotsSendCustomRequest.SerializeToString,
response_deserializer=tl__pb2.TypeDataJSON.FromString,
)
self.BotsAnswerWebhookJSONQuery = channel.unary_unary(
'/mtproto.Mtproto/BotsAnswerWebhookJSONQuery',
request_serializer=tl__pb2.ReqBotsAnswerWebhookJSONQuery.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PaymentsGetPaymentReceipt = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetPaymentReceipt',
request_serializer=tl__pb2.ReqPaymentsGetPaymentReceipt.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentReceipt.FromString,
)
self.PaymentsValidateRequestedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsValidateRequestedInfo',
request_serializer=tl__pb2.ReqPaymentsValidateRequestedInfo.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsValidatedRequestedInfo.FromString,
)
self.PaymentsGetSavedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetSavedInfo',
request_serializer=tl__pb2.ReqPaymentsGetSavedInfo.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsSavedInfo.FromString,
)
self.PaymentsClearSavedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsClearSavedInfo',
request_serializer=tl__pb2.ReqPaymentsClearSavedInfo.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PhoneGetCallConfig = channel.unary_unary(
'/mtproto.Mtproto/PhoneGetCallConfig',
request_serializer=tl__pb2.ReqPhoneGetCallConfig.SerializeToString,
response_deserializer=tl__pb2.TypeDataJSON.FromString,
)
self.PhoneConfirmCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneConfirmCall',
request_serializer=tl__pb2.ReqPhoneConfirmCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneSetCallRating = channel.unary_unary(
'/mtproto.Mtproto/PhoneSetCallRating',
request_serializer=tl__pb2.ReqPhoneSetCallRating.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.PhoneSaveCallDebug = channel.unary_unary(
'/mtproto.Mtproto/PhoneSaveCallDebug',
request_serializer=tl__pb2.ReqPhoneSaveCallDebug.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetCdnFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetCdnFile',
request_serializer=tl__pb2.ReqUploadGetCdnFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadCdnFile.FromString,
)
self.UploadReuploadCdnFile = channel.unary_unary(
'/mtproto.Mtproto/UploadReuploadCdnFile',
request_serializer=tl__pb2.ReqUploadReuploadCdnFile.SerializeToString,
response_deserializer=tl__pb2.TypeVectorCdnFileHash.FromString,
)
self.HelpGetCdnConfig = channel.unary_unary(
'/mtproto.Mtproto/HelpGetCdnConfig',
request_serializer=tl__pb2.ReqHelpGetCdnConfig.SerializeToString,
response_deserializer=tl__pb2.TypeCdnConfig.FromString,
)
self.MessagesUploadMedia = channel.unary_unary(
'/mtproto.Mtproto/MessagesUploadMedia',
request_serializer=tl__pb2.ReqMessagesUploadMedia.SerializeToString,
response_deserializer=tl__pb2.TypeMessageMedia.FromString,
)
self.StickersCreateStickerSet = channel.unary_unary(
'/mtproto.Mtproto/StickersCreateStickerSet',
request_serializer=tl__pb2.ReqStickersCreateStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.LangpackGetLangPack = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetLangPack',
request_serializer=tl__pb2.ReqLangpackGetLangPack.SerializeToString,
response_deserializer=tl__pb2.TypeLangPackDifference.FromString,
)
self.LangpackGetStrings = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetStrings',
request_serializer=tl__pb2.ReqLangpackGetStrings.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLangPackString.FromString,
)
self.LangpackGetDifference = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetDifference',
request_serializer=tl__pb2.ReqLangpackGetDifference.SerializeToString,
response_deserializer=tl__pb2.TypeLangPackDifference.FromString,
)
self.LangpackGetLanguages = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetLanguages',
request_serializer=tl__pb2.ReqLangpackGetLanguages.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLangPackLanguage.FromString,
)
self.ChannelsEditBanned = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditBanned',
request_serializer=tl__pb2.ReqChannelsEditBanned.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsGetAdminLog = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetAdminLog',
request_serializer=tl__pb2.ReqChannelsGetAdminLog.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsAdminLogResults.FromString,
)
self.StickersRemoveStickerFromSet = channel.unary_unary(
'/mtproto.Mtproto/StickersRemoveStickerFromSet',
request_serializer=tl__pb2.ReqStickersRemoveStickerFromSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.StickersChangeStickerPosition = channel.unary_unary(
'/mtproto.Mtproto/StickersChangeStickerPosition',
request_serializer=tl__pb2.ReqStickersChangeStickerPosition.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.StickersAddStickerToSet = channel.unary_unary(
'/mtproto.Mtproto/StickersAddStickerToSet',
request_serializer=tl__pb2.ReqStickersAddStickerToSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.MessagesSendScreenshotNotification = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendScreenshotNotification',
request_serializer=tl__pb2.ReqMessagesSendScreenshotNotification.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.UploadGetCdnFileHashes = channel.unary_unary(
'/mtproto.Mtproto/UploadGetCdnFileHashes',
request_serializer=tl__pb2.ReqUploadGetCdnFileHashes.SerializeToString,
response_deserializer=tl__pb2.TypeVectorCdnFileHash.FromString,
)
self.MessagesGetUnreadMentions = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetUnreadMentions',
request_serializer=tl__pb2.ReqMessagesGetUnreadMentions.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesFaveSticker = channel.unary_unary(
'/mtproto.Mtproto/MessagesFaveSticker',
request_serializer=tl__pb2.ReqMessagesFaveSticker.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsSetStickers = channel.unary_unary(
'/mtproto.Mtproto/ChannelsSetStickers',
request_serializer=tl__pb2.ReqChannelsSetStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsResetSaved = channel.unary_unary(
'/mtproto.Mtproto/ContactsResetSaved',
request_serializer=tl__pb2.ReqContactsResetSaved.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetFavedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFavedStickers',
request_serializer=tl__pb2.ReqMessagesGetFavedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFavedStickers.FromString,
)
self.ChannelsReadMessageContents = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReadMessageContents',
request_serializer=tl__pb2.ReqChannelsReadMessageContents.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
class MtprotoServicer(object):
"""Procedures
"""
def InvokeAfterMsg(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeAfterMsgs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCheckPhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSendCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSignUp(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSignIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthLogOut(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthResetAuthorizations(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSendInvites(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthExportAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthImportAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountRegisterDevice(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUnregisterDevice(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountResetNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateProfile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetWallPapers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UsersGetUsers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UsersGetFullUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetStatuses(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsImportContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsSearch(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsDeleteContact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsDeleteContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsBlock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsUnblock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetBlocked(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearch(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReceivedMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetTyping(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendMedia(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesForwardMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFullChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatTitle(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatPhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesAddChatUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteChatUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesCreateChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetState(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosUpdateProfilePhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosUploadProfilePhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadSaveFilePart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetNearestDc(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetAppUpdate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpSaveAppLog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetInviteText(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosDeletePhotos(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosGetUserPhotos(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesForwardMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDhConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesRequestEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesAcceptEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDiscardEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetEncryptedTyping(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadEncryptedHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncrypted(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncryptedFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncryptedService(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReceivedQueue(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadSaveBigFilePart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InitConnection(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetSupport(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthBindTempAuthKey(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsExportCard(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsImportCard(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadMessageContents(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountCheckUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPrivacy(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSetPrivacy(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountDeleteAccount(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetAccountTTL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSetAccountTTL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeWithLayer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResolveUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSendChangePhoneCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountChangePhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateDeviceLocked(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCheckPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetWebPagePreview(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetAuthorizations(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountResetAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPasswordSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdatePasswordSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthRequestPasswordRecovery(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthRecoverPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeWithoutUpdates(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesExportChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesCheckChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesImportChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesInstallStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesUninstallStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthImportBotAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesStartBot(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetAppChangelog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessagesViews(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetChannelDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReadHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteUserHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetParticipants(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetParticipant(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetFullChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsCreateChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditAbout(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditAdmin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditTitle(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditPhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsCheckUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsUpdateUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsJoinChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsLeaveChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsInviteToChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsExportInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesToggleChatAdmins(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatAdmin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesMigrateChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearchGlobal(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountReportPeer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReorderStickerSets(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetTermsOfService(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDocumentByHash(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearchGifs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetSavedGifs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveGif(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetInlineBotResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetInlineBotResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendInlineBotResult(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsToggleInvites(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsExportMessageLink(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsToggleSignatures(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesHideReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPeerSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsUpdatePinnedMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthResendCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCancelCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessageEditData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditInlineBotMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetBotCallbackAnswer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotCallbackAnswer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetTopPeers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResetTopPeerRating(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPeerDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveDraft(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllDrafts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSendConfirmPhoneCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountConfirmPhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFeaturedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadFeaturedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetRecentStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveRecentSticker(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesClearRecentStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetArchivedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetAdminedPublicChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthDropTempAuthKeys(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetGameScore(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetInlineGameScore(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMaskStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAttachedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetGameHighScores(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetInlineGameHighScores(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetCommonChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpSetBotUpdatesStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetWebPage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesToggleDialogPin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReorderPinnedDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPinnedDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneRequestCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneAcceptCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneDiscardCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneReceivedCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReportEncryptedSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetPaymentForm(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsSendPaymentForm(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetTmpPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotShippingResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotPrecheckoutResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetWebFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BotsSendCustomRequest(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BotsAnswerWebhookJSONQuery(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetPaymentReceipt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsValidateRequestedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetSavedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsClearSavedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneGetCallConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneConfirmCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneSetCallRating(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneSaveCallDebug(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetCdnFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadReuploadCdnFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetCdnConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesUploadMedia(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersCreateStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetLangPack(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetStrings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetLanguages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditBanned(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetAdminLog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersRemoveStickerFromSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersChangeStickerPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersAddStickerToSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendScreenshotNotification(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetCdnFileHashes(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetUnreadMentions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesFaveSticker(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsSetStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResetSaved(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFavedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReadMessageContents(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MtprotoServicer_to_server(servicer, server):
rpc_method_handlers = {
'InvokeAfterMsg': grpc.unary_unary_rpc_method_handler(
servicer.InvokeAfterMsg,
request_deserializer=tl__pb2.ReqInvokeAfterMsg.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'InvokeAfterMsgs': grpc.unary_unary_rpc_method_handler(
servicer.InvokeAfterMsgs,
request_deserializer=tl__pb2.ReqInvokeAfterMsgs.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'AuthCheckPhone': grpc.unary_unary_rpc_method_handler(
servicer.AuthCheckPhone,
request_deserializer=tl__pb2.ReqAuthCheckPhone.FromString,
response_serializer=tl__pb2.TypeAuthCheckedPhone.SerializeToString,
),
'AuthSendCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthSendCode,
request_deserializer=tl__pb2.ReqAuthSendCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AuthSignUp': grpc.unary_unary_rpc_method_handler(
servicer.AuthSignUp,
request_deserializer=tl__pb2.ReqAuthSignUp.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AuthSignIn': grpc.unary_unary_rpc_method_handler(
servicer.AuthSignIn,
request_deserializer=tl__pb2.ReqAuthSignIn.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AuthLogOut': grpc.unary_unary_rpc_method_handler(
servicer.AuthLogOut,
request_deserializer=tl__pb2.ReqAuthLogOut.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthResetAuthorizations': grpc.unary_unary_rpc_method_handler(
servicer.AuthResetAuthorizations,
request_deserializer=tl__pb2.ReqAuthResetAuthorizations.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthSendInvites': grpc.unary_unary_rpc_method_handler(
servicer.AuthSendInvites,
request_deserializer=tl__pb2.ReqAuthSendInvites.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthExportAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthExportAuthorization,
request_deserializer=tl__pb2.ReqAuthExportAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthExportedAuthorization.SerializeToString,
),
'AuthImportAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthImportAuthorization,
request_deserializer=tl__pb2.ReqAuthImportAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AccountRegisterDevice': grpc.unary_unary_rpc_method_handler(
servicer.AccountRegisterDevice,
request_deserializer=tl__pb2.ReqAccountRegisterDevice.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUnregisterDevice': grpc.unary_unary_rpc_method_handler(
servicer.AccountUnregisterDevice,
request_deserializer=tl__pb2.ReqAccountUnregisterDevice.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateNotifySettings,
request_deserializer=tl__pb2.ReqAccountUpdateNotifySettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetNotifySettings,
request_deserializer=tl__pb2.ReqAccountGetNotifySettings.FromString,
response_serializer=tl__pb2.TypePeerNotifySettings.SerializeToString,
),
'AccountResetNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountResetNotifySettings,
request_deserializer=tl__pb2.ReqAccountResetNotifySettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateProfile': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateProfile,
request_deserializer=tl__pb2.ReqAccountUpdateProfile.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'AccountUpdateStatus': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateStatus,
request_deserializer=tl__pb2.ReqAccountUpdateStatus.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetWallPapers': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetWallPapers,
request_deserializer=tl__pb2.ReqAccountGetWallPapers.FromString,
response_serializer=tl__pb2.TypeVectorWallPaper.SerializeToString,
),
'UsersGetUsers': grpc.unary_unary_rpc_method_handler(
servicer.UsersGetUsers,
request_deserializer=tl__pb2.ReqUsersGetUsers.FromString,
response_serializer=tl__pb2.TypeVectorUser.SerializeToString,
),
'UsersGetFullUser': grpc.unary_unary_rpc_method_handler(
servicer.UsersGetFullUser,
request_deserializer=tl__pb2.ReqUsersGetFullUser.FromString,
response_serializer=tl__pb2.TypeUserFull.SerializeToString,
),
'ContactsGetStatuses': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetStatuses,
request_deserializer=tl__pb2.ReqContactsGetStatuses.FromString,
response_serializer=tl__pb2.TypeVectorContactStatus.SerializeToString,
),
'ContactsGetContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetContacts,
request_deserializer=tl__pb2.ReqContactsGetContacts.FromString,
response_serializer=tl__pb2.TypeContactsContacts.SerializeToString,
),
'ContactsImportContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsImportContacts,
request_deserializer=tl__pb2.ReqContactsImportContacts.FromString,
response_serializer=tl__pb2.TypeContactsImportedContacts.SerializeToString,
),
'ContactsSearch': grpc.unary_unary_rpc_method_handler(
servicer.ContactsSearch,
request_deserializer=tl__pb2.ReqContactsSearch.FromString,
response_serializer=tl__pb2.TypeContactsFound.SerializeToString,
),
'ContactsDeleteContact': grpc.unary_unary_rpc_method_handler(
servicer.ContactsDeleteContact,
request_deserializer=tl__pb2.ReqContactsDeleteContact.FromString,
response_serializer=tl__pb2.TypeContactsLink.SerializeToString,
),
'ContactsDeleteContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsDeleteContacts,
request_deserializer=tl__pb2.ReqContactsDeleteContacts.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsBlock': grpc.unary_unary_rpc_method_handler(
servicer.ContactsBlock,
request_deserializer=tl__pb2.ReqContactsBlock.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsUnblock': grpc.unary_unary_rpc_method_handler(
servicer.ContactsUnblock,
request_deserializer=tl__pb2.ReqContactsUnblock.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsGetBlocked': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetBlocked,
request_deserializer=tl__pb2.ReqContactsGetBlocked.FromString,
response_serializer=tl__pb2.TypeContactsBlocked.SerializeToString,
),
'MessagesGetMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessages,
request_deserializer=tl__pb2.ReqMessagesGetMessages.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesGetDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDialogs,
request_deserializer=tl__pb2.ReqMessagesGetDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesDialogs.SerializeToString,
),
'MessagesGetHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetHistory,
request_deserializer=tl__pb2.ReqMessagesGetHistory.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesSearch': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearch,
request_deserializer=tl__pb2.ReqMessagesSearch.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesReadHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadHistory,
request_deserializer=tl__pb2.ReqMessagesReadHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'MessagesDeleteHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteHistory,
request_deserializer=tl__pb2.ReqMessagesDeleteHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedHistory.SerializeToString,
),
'MessagesDeleteMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteMessages,
request_deserializer=tl__pb2.ReqMessagesDeleteMessages.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'MessagesReceivedMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReceivedMessages,
request_deserializer=tl__pb2.ReqMessagesReceivedMessages.FromString,
response_serializer=tl__pb2.TypeVectorReceivedNotifyMessage.SerializeToString,
),
'MessagesSetTyping': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetTyping,
request_deserializer=tl__pb2.ReqMessagesSetTyping.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendMessage,
request_deserializer=tl__pb2.ReqMessagesSendMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSendMedia': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendMedia,
request_deserializer=tl__pb2.ReqMessagesSendMedia.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesForwardMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesForwardMessages,
request_deserializer=tl__pb2.ReqMessagesForwardMessages.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetChats,
request_deserializer=tl__pb2.ReqMessagesGetChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'MessagesGetFullChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFullChat,
request_deserializer=tl__pb2.ReqMessagesGetFullChat.FromString,
response_serializer=tl__pb2.TypeMessagesChatFull.SerializeToString,
),
'MessagesEditChatTitle': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatTitle,
request_deserializer=tl__pb2.ReqMessagesEditChatTitle.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditChatPhoto': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatPhoto,
request_deserializer=tl__pb2.ReqMessagesEditChatPhoto.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesAddChatUser': grpc.unary_unary_rpc_method_handler(
servicer.MessagesAddChatUser,
request_deserializer=tl__pb2.ReqMessagesAddChatUser.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesDeleteChatUser': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteChatUser,
request_deserializer=tl__pb2.ReqMessagesDeleteChatUser.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesCreateChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesCreateChat,
request_deserializer=tl__pb2.ReqMessagesCreateChat.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'UpdatesGetState': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetState,
request_deserializer=tl__pb2.ReqUpdatesGetState.FromString,
response_serializer=tl__pb2.TypeUpdatesState.SerializeToString,
),
'UpdatesGetDifference': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetDifference,
request_deserializer=tl__pb2.ReqUpdatesGetDifference.FromString,
response_serializer=tl__pb2.TypeUpdatesDifference.SerializeToString,
),
'PhotosUpdateProfilePhoto': grpc.unary_unary_rpc_method_handler(
servicer.PhotosUpdateProfilePhoto,
request_deserializer=tl__pb2.ReqPhotosUpdateProfilePhoto.FromString,
response_serializer=tl__pb2.TypeUserProfilePhoto.SerializeToString,
),
'PhotosUploadProfilePhoto': grpc.unary_unary_rpc_method_handler(
servicer.PhotosUploadProfilePhoto,
request_deserializer=tl__pb2.ReqPhotosUploadProfilePhoto.FromString,
response_serializer=tl__pb2.TypePhotosPhoto.SerializeToString,
),
'UploadSaveFilePart': grpc.unary_unary_rpc_method_handler(
servicer.UploadSaveFilePart,
request_deserializer=tl__pb2.ReqUploadSaveFilePart.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetFile,
request_deserializer=tl__pb2.ReqUploadGetFile.FromString,
response_serializer=tl__pb2.TypeUploadFile.SerializeToString,
),
'HelpGetConfig': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetConfig,
request_deserializer=tl__pb2.ReqHelpGetConfig.FromString,
response_serializer=tl__pb2.TypeConfig.SerializeToString,
),
'HelpGetNearestDc': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetNearestDc,
request_deserializer=tl__pb2.ReqHelpGetNearestDc.FromString,
response_serializer=tl__pb2.TypeNearestDc.SerializeToString,
),
'HelpGetAppUpdate': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetAppUpdate,
request_deserializer=tl__pb2.ReqHelpGetAppUpdate.FromString,
response_serializer=tl__pb2.TypeHelpAppUpdate.SerializeToString,
),
'HelpSaveAppLog': grpc.unary_unary_rpc_method_handler(
servicer.HelpSaveAppLog,
request_deserializer=tl__pb2.ReqHelpSaveAppLog.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'HelpGetInviteText': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetInviteText,
request_deserializer=tl__pb2.ReqHelpGetInviteText.FromString,
response_serializer=tl__pb2.TypeHelpInviteText.SerializeToString,
),
'PhotosDeletePhotos': grpc.unary_unary_rpc_method_handler(
servicer.PhotosDeletePhotos,
request_deserializer=tl__pb2.ReqPhotosDeletePhotos.FromString,
response_serializer=tl__pb2.TypeVectorLong.SerializeToString,
),
'PhotosGetUserPhotos': grpc.unary_unary_rpc_method_handler(
servicer.PhotosGetUserPhotos,
request_deserializer=tl__pb2.ReqPhotosGetUserPhotos.FromString,
response_serializer=tl__pb2.TypePhotosPhotos.SerializeToString,
),
'MessagesForwardMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesForwardMessage,
request_deserializer=tl__pb2.ReqMessagesForwardMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetDhConfig': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDhConfig,
request_deserializer=tl__pb2.ReqMessagesGetDhConfig.FromString,
response_serializer=tl__pb2.TypeMessagesDhConfig.SerializeToString,
),
'MessagesRequestEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesRequestEncryption,
request_deserializer=tl__pb2.ReqMessagesRequestEncryption.FromString,
response_serializer=tl__pb2.TypeEncryptedChat.SerializeToString,
),
'MessagesAcceptEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesAcceptEncryption,
request_deserializer=tl__pb2.ReqMessagesAcceptEncryption.FromString,
response_serializer=tl__pb2.TypeEncryptedChat.SerializeToString,
),
'MessagesDiscardEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDiscardEncryption,
request_deserializer=tl__pb2.ReqMessagesDiscardEncryption.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetEncryptedTyping': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetEncryptedTyping,
request_deserializer=tl__pb2.ReqMessagesSetEncryptedTyping.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReadEncryptedHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadEncryptedHistory,
request_deserializer=tl__pb2.ReqMessagesReadEncryptedHistory.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendEncrypted': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncrypted,
request_deserializer=tl__pb2.ReqMessagesSendEncrypted.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesSendEncryptedFile': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncryptedFile,
request_deserializer=tl__pb2.ReqMessagesSendEncryptedFile.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesSendEncryptedService': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncryptedService,
request_deserializer=tl__pb2.ReqMessagesSendEncryptedService.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesReceivedQueue': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReceivedQueue,
request_deserializer=tl__pb2.ReqMessagesReceivedQueue.FromString,
response_serializer=tl__pb2.TypeVectorLong.SerializeToString,
),
'UploadSaveBigFilePart': grpc.unary_unary_rpc_method_handler(
servicer.UploadSaveBigFilePart,
request_deserializer=tl__pb2.ReqUploadSaveBigFilePart.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'InitConnection': grpc.unary_unary_rpc_method_handler(
servicer.InitConnection,
request_deserializer=tl__pb2.ReqInitConnection.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'HelpGetSupport': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetSupport,
request_deserializer=tl__pb2.ReqHelpGetSupport.FromString,
response_serializer=tl__pb2.TypeHelpSupport.SerializeToString,
),
'AuthBindTempAuthKey': grpc.unary_unary_rpc_method_handler(
servicer.AuthBindTempAuthKey,
request_deserializer=tl__pb2.ReqAuthBindTempAuthKey.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsExportCard': grpc.unary_unary_rpc_method_handler(
servicer.ContactsExportCard,
request_deserializer=tl__pb2.ReqContactsExportCard.FromString,
response_serializer=tl__pb2.TypeVectorInt.SerializeToString,
),
'ContactsImportCard': grpc.unary_unary_rpc_method_handler(
servicer.ContactsImportCard,
request_deserializer=tl__pb2.ReqContactsImportCard.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'MessagesReadMessageContents': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadMessageContents,
request_deserializer=tl__pb2.ReqMessagesReadMessageContents.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'AccountCheckUsername': grpc.unary_unary_rpc_method_handler(
servicer.AccountCheckUsername,
request_deserializer=tl__pb2.ReqAccountCheckUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateUsername': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateUsername,
request_deserializer=tl__pb2.ReqAccountUpdateUsername.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'AccountGetPrivacy': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPrivacy,
request_deserializer=tl__pb2.ReqAccountGetPrivacy.FromString,
response_serializer=tl__pb2.TypeAccountPrivacyRules.SerializeToString,
),
'AccountSetPrivacy': grpc.unary_unary_rpc_method_handler(
servicer.AccountSetPrivacy,
request_deserializer=tl__pb2.ReqAccountSetPrivacy.FromString,
response_serializer=tl__pb2.TypeAccountPrivacyRules.SerializeToString,
),
'AccountDeleteAccount': grpc.unary_unary_rpc_method_handler(
servicer.AccountDeleteAccount,
request_deserializer=tl__pb2.ReqAccountDeleteAccount.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetAccountTTL': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetAccountTTL,
request_deserializer=tl__pb2.ReqAccountGetAccountTTL.FromString,
response_serializer=tl__pb2.TypeAccountDaysTTL.SerializeToString,
),
'AccountSetAccountTTL': grpc.unary_unary_rpc_method_handler(
servicer.AccountSetAccountTTL,
request_deserializer=tl__pb2.ReqAccountSetAccountTTL.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'InvokeWithLayer': grpc.unary_unary_rpc_method_handler(
servicer.InvokeWithLayer,
request_deserializer=tl__pb2.ReqInvokeWithLayer.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'ContactsResolveUsername': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResolveUsername,
request_deserializer=tl__pb2.ReqContactsResolveUsername.FromString,
response_serializer=tl__pb2.TypeContactsResolvedPeer.SerializeToString,
),
'AccountSendChangePhoneCode': grpc.unary_unary_rpc_method_handler(
servicer.AccountSendChangePhoneCode,
request_deserializer=tl__pb2.ReqAccountSendChangePhoneCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AccountChangePhone': grpc.unary_unary_rpc_method_handler(
servicer.AccountChangePhone,
request_deserializer=tl__pb2.ReqAccountChangePhone.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'MessagesGetAllStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllStickers,
request_deserializer=tl__pb2.ReqMessagesGetAllStickers.FromString,
response_serializer=tl__pb2.TypeMessagesAllStickers.SerializeToString,
),
'AccountUpdateDeviceLocked': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateDeviceLocked,
request_deserializer=tl__pb2.ReqAccountUpdateDeviceLocked.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetPassword': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPassword,
request_deserializer=tl__pb2.ReqAccountGetPassword.FromString,
response_serializer=tl__pb2.TypeAccountPassword.SerializeToString,
),
'AuthCheckPassword': grpc.unary_unary_rpc_method_handler(
servicer.AuthCheckPassword,
request_deserializer=tl__pb2.ReqAuthCheckPassword.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'MessagesGetWebPagePreview': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetWebPagePreview,
request_deserializer=tl__pb2.ReqMessagesGetWebPagePreview.FromString,
response_serializer=tl__pb2.TypeMessageMedia.SerializeToString,
),
'AccountGetAuthorizations': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetAuthorizations,
request_deserializer=tl__pb2.ReqAccountGetAuthorizations.FromString,
response_serializer=tl__pb2.TypeAccountAuthorizations.SerializeToString,
),
'AccountResetAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AccountResetAuthorization,
request_deserializer=tl__pb2.ReqAccountResetAuthorization.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetPasswordSettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPasswordSettings,
request_deserializer=tl__pb2.ReqAccountGetPasswordSettings.FromString,
response_serializer=tl__pb2.TypeAccountPasswordSettings.SerializeToString,
),
'AccountUpdatePasswordSettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdatePasswordSettings,
request_deserializer=tl__pb2.ReqAccountUpdatePasswordSettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthRequestPasswordRecovery': grpc.unary_unary_rpc_method_handler(
servicer.AuthRequestPasswordRecovery,
request_deserializer=tl__pb2.ReqAuthRequestPasswordRecovery.FromString,
response_serializer=tl__pb2.TypeAuthPasswordRecovery.SerializeToString,
),
'AuthRecoverPassword': grpc.unary_unary_rpc_method_handler(
servicer.AuthRecoverPassword,
request_deserializer=tl__pb2.ReqAuthRecoverPassword.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'InvokeWithoutUpdates': grpc.unary_unary_rpc_method_handler(
servicer.InvokeWithoutUpdates,
request_deserializer=tl__pb2.ReqInvokeWithoutUpdates.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'MessagesExportChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesExportChatInvite,
request_deserializer=tl__pb2.ReqMessagesExportChatInvite.FromString,
response_serializer=tl__pb2.TypeExportedChatInvite.SerializeToString,
),
'MessagesCheckChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesCheckChatInvite,
request_deserializer=tl__pb2.ReqMessagesCheckChatInvite.FromString,
response_serializer=tl__pb2.TypeChatInvite.SerializeToString,
),
'MessagesImportChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesImportChatInvite,
request_deserializer=tl__pb2.ReqMessagesImportChatInvite.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetStickerSet,
request_deserializer=tl__pb2.ReqMessagesGetStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'MessagesInstallStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesInstallStickerSet,
request_deserializer=tl__pb2.ReqMessagesInstallStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSetInstallResult.SerializeToString,
),
'MessagesUninstallStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesUninstallStickerSet,
request_deserializer=tl__pb2.ReqMessagesUninstallStickerSet.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthImportBotAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthImportBotAuthorization,
request_deserializer=tl__pb2.ReqAuthImportBotAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'MessagesStartBot': grpc.unary_unary_rpc_method_handler(
servicer.MessagesStartBot,
request_deserializer=tl__pb2.ReqMessagesStartBot.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'HelpGetAppChangelog': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetAppChangelog,
request_deserializer=tl__pb2.ReqHelpGetAppChangelog.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReportSpam,
request_deserializer=tl__pb2.ReqMessagesReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMessagesViews': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessagesViews,
request_deserializer=tl__pb2.ReqMessagesGetMessagesViews.FromString,
response_serializer=tl__pb2.TypeVectorInt.SerializeToString,
),
'UpdatesGetChannelDifference': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetChannelDifference,
request_deserializer=tl__pb2.ReqUpdatesGetChannelDifference.FromString,
response_serializer=tl__pb2.TypeUpdatesChannelDifference.SerializeToString,
),
'ChannelsReadHistory': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReadHistory,
request_deserializer=tl__pb2.ReqChannelsReadHistory.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsDeleteMessages': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteMessages,
request_deserializer=tl__pb2.ReqChannelsDeleteMessages.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'ChannelsDeleteUserHistory': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteUserHistory,
request_deserializer=tl__pb2.ReqChannelsDeleteUserHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedHistory.SerializeToString,
),
'ChannelsReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReportSpam,
request_deserializer=tl__pb2.ReqChannelsReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsGetMessages': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetMessages,
request_deserializer=tl__pb2.ReqChannelsGetMessages.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'ChannelsGetParticipants': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetParticipants,
request_deserializer=tl__pb2.ReqChannelsGetParticipants.FromString,
response_serializer=tl__pb2.TypeChannelsChannelParticipants.SerializeToString,
),
'ChannelsGetParticipant': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetParticipant,
request_deserializer=tl__pb2.ReqChannelsGetParticipant.FromString,
response_serializer=tl__pb2.TypeChannelsChannelParticipant.SerializeToString,
),
'ChannelsGetChannels': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetChannels,
request_deserializer=tl__pb2.ReqChannelsGetChannels.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'ChannelsGetFullChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetFullChannel,
request_deserializer=tl__pb2.ReqChannelsGetFullChannel.FromString,
response_serializer=tl__pb2.TypeMessagesChatFull.SerializeToString,
),
'ChannelsCreateChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsCreateChannel,
request_deserializer=tl__pb2.ReqChannelsCreateChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditAbout': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditAbout,
request_deserializer=tl__pb2.ReqChannelsEditAbout.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsEditAdmin': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditAdmin,
request_deserializer=tl__pb2.ReqChannelsEditAdmin.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditTitle': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditTitle,
request_deserializer=tl__pb2.ReqChannelsEditTitle.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditPhoto': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditPhoto,
request_deserializer=tl__pb2.ReqChannelsEditPhoto.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsCheckUsername': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsCheckUsername,
request_deserializer=tl__pb2.ReqChannelsCheckUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsUpdateUsername': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsUpdateUsername,
request_deserializer=tl__pb2.ReqChannelsUpdateUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsJoinChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsJoinChannel,
request_deserializer=tl__pb2.ReqChannelsJoinChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsLeaveChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsLeaveChannel,
request_deserializer=tl__pb2.ReqChannelsLeaveChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsInviteToChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsInviteToChannel,
request_deserializer=tl__pb2.ReqChannelsInviteToChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsExportInvite': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsExportInvite,
request_deserializer=tl__pb2.ReqChannelsExportInvite.FromString,
response_serializer=tl__pb2.TypeExportedChatInvite.SerializeToString,
),
'ChannelsDeleteChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteChannel,
request_deserializer=tl__pb2.ReqChannelsDeleteChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesToggleChatAdmins': grpc.unary_unary_rpc_method_handler(
servicer.MessagesToggleChatAdmins,
request_deserializer=tl__pb2.ReqMessagesToggleChatAdmins.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditChatAdmin': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatAdmin,
request_deserializer=tl__pb2.ReqMessagesEditChatAdmin.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesMigrateChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesMigrateChat,
request_deserializer=tl__pb2.ReqMessagesMigrateChat.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSearchGlobal': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearchGlobal,
request_deserializer=tl__pb2.ReqMessagesSearchGlobal.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'AccountReportPeer': grpc.unary_unary_rpc_method_handler(
servicer.AccountReportPeer,
request_deserializer=tl__pb2.ReqAccountReportPeer.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReorderStickerSets': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReorderStickerSets,
request_deserializer=tl__pb2.ReqMessagesReorderStickerSets.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'HelpGetTermsOfService': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetTermsOfService,
request_deserializer=tl__pb2.ReqHelpGetTermsOfService.FromString,
response_serializer=tl__pb2.TypeHelpTermsOfService.SerializeToString,
),
'MessagesGetDocumentByHash': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDocumentByHash,
request_deserializer=tl__pb2.ReqMessagesGetDocumentByHash.FromString,
response_serializer=tl__pb2.TypeDocument.SerializeToString,
),
'MessagesSearchGifs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearchGifs,
request_deserializer=tl__pb2.ReqMessagesSearchGifs.FromString,
response_serializer=tl__pb2.TypeMessagesFoundGifs.SerializeToString,
),
'MessagesGetSavedGifs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetSavedGifs,
request_deserializer=tl__pb2.ReqMessagesGetSavedGifs.FromString,
response_serializer=tl__pb2.TypeMessagesSavedGifs.SerializeToString,
),
'MessagesSaveGif': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveGif,
request_deserializer=tl__pb2.ReqMessagesSaveGif.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetInlineBotResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetInlineBotResults,
request_deserializer=tl__pb2.ReqMessagesGetInlineBotResults.FromString,
response_serializer=tl__pb2.TypeMessagesBotResults.SerializeToString,
),
'MessagesSetInlineBotResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetInlineBotResults,
request_deserializer=tl__pb2.ReqMessagesSetInlineBotResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendInlineBotResult': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendInlineBotResult,
request_deserializer=tl__pb2.ReqMessagesSendInlineBotResult.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsToggleInvites': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsToggleInvites,
request_deserializer=tl__pb2.ReqChannelsToggleInvites.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsExportMessageLink': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsExportMessageLink,
request_deserializer=tl__pb2.ReqChannelsExportMessageLink.FromString,
response_serializer=tl__pb2.TypeExportedMessageLink.SerializeToString,
),
'ChannelsToggleSignatures': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsToggleSignatures,
request_deserializer=tl__pb2.ReqChannelsToggleSignatures.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesHideReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesHideReportSpam,
request_deserializer=tl__pb2.ReqMessagesHideReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPeerSettings': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPeerSettings,
request_deserializer=tl__pb2.ReqMessagesGetPeerSettings.FromString,
response_serializer=tl__pb2.TypePeerSettings.SerializeToString,
),
'ChannelsUpdatePinnedMessage': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsUpdatePinnedMessage,
request_deserializer=tl__pb2.ReqChannelsUpdatePinnedMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'AuthResendCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthResendCode,
request_deserializer=tl__pb2.ReqAuthResendCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AuthCancelCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthCancelCode,
request_deserializer=tl__pb2.ReqAuthCancelCode.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMessageEditData': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessageEditData,
request_deserializer=tl__pb2.ReqMessagesGetMessageEditData.FromString,
response_serializer=tl__pb2.TypeMessagesMessageEditData.SerializeToString,
),
'MessagesEditMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditMessage,
request_deserializer=tl__pb2.ReqMessagesEditMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditInlineBotMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditInlineBotMessage,
request_deserializer=tl__pb2.ReqMessagesEditInlineBotMessage.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetBotCallbackAnswer': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetBotCallbackAnswer,
request_deserializer=tl__pb2.ReqMessagesGetBotCallbackAnswer.FromString,
response_serializer=tl__pb2.TypeMessagesBotCallbackAnswer.SerializeToString,
),
'MessagesSetBotCallbackAnswer': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotCallbackAnswer,
request_deserializer=tl__pb2.ReqMessagesSetBotCallbackAnswer.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsGetTopPeers': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetTopPeers,
request_deserializer=tl__pb2.ReqContactsGetTopPeers.FromString,
response_serializer=tl__pb2.TypeContactsTopPeers.SerializeToString,
),
'ContactsResetTopPeerRating': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResetTopPeerRating,
request_deserializer=tl__pb2.ReqContactsResetTopPeerRating.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPeerDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPeerDialogs,
request_deserializer=tl__pb2.ReqMessagesGetPeerDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesPeerDialogs.SerializeToString,
),
'MessagesSaveDraft': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveDraft,
request_deserializer=tl__pb2.ReqMessagesSaveDraft.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetAllDrafts': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllDrafts,
request_deserializer=tl__pb2.ReqMessagesGetAllDrafts.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'AccountSendConfirmPhoneCode': grpc.unary_unary_rpc_method_handler(
servicer.AccountSendConfirmPhoneCode,
request_deserializer=tl__pb2.ReqAccountSendConfirmPhoneCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AccountConfirmPhone': grpc.unary_unary_rpc_method_handler(
servicer.AccountConfirmPhone,
request_deserializer=tl__pb2.ReqAccountConfirmPhone.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetFeaturedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFeaturedStickers,
request_deserializer=tl__pb2.ReqMessagesGetFeaturedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesFeaturedStickers.SerializeToString,
),
'MessagesReadFeaturedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadFeaturedStickers,
request_deserializer=tl__pb2.ReqMessagesReadFeaturedStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetRecentStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetRecentStickers,
request_deserializer=tl__pb2.ReqMessagesGetRecentStickers.FromString,
response_serializer=tl__pb2.TypeMessagesRecentStickers.SerializeToString,
),
'MessagesSaveRecentSticker': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveRecentSticker,
request_deserializer=tl__pb2.ReqMessagesSaveRecentSticker.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesClearRecentStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesClearRecentStickers,
request_deserializer=tl__pb2.ReqMessagesClearRecentStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetArchivedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetArchivedStickers,
request_deserializer=tl__pb2.ReqMessagesGetArchivedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesArchivedStickers.SerializeToString,
),
'ChannelsGetAdminedPublicChannels': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetAdminedPublicChannels,
request_deserializer=tl__pb2.ReqChannelsGetAdminedPublicChannels.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'AuthDropTempAuthKeys': grpc.unary_unary_rpc_method_handler(
servicer.AuthDropTempAuthKeys,
request_deserializer=tl__pb2.ReqAuthDropTempAuthKeys.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetGameScore': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetGameScore,
request_deserializer=tl__pb2.ReqMessagesSetGameScore.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSetInlineGameScore': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetInlineGameScore,
request_deserializer=tl__pb2.ReqMessagesSetInlineGameScore.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMaskStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMaskStickers,
request_deserializer=tl__pb2.ReqMessagesGetMaskStickers.FromString,
response_serializer=tl__pb2.TypeMessagesAllStickers.SerializeToString,
),
'MessagesGetAttachedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAttachedStickers,
request_deserializer=tl__pb2.ReqMessagesGetAttachedStickers.FromString,
response_serializer=tl__pb2.TypeVectorStickerSetCovered.SerializeToString,
),
'MessagesGetGameHighScores': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetGameHighScores,
request_deserializer=tl__pb2.ReqMessagesGetGameHighScores.FromString,
response_serializer=tl__pb2.TypeMessagesHighScores.SerializeToString,
),
'MessagesGetInlineGameHighScores': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetInlineGameHighScores,
request_deserializer=tl__pb2.ReqMessagesGetInlineGameHighScores.FromString,
response_serializer=tl__pb2.TypeMessagesHighScores.SerializeToString,
),
'MessagesGetCommonChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetCommonChats,
request_deserializer=tl__pb2.ReqMessagesGetCommonChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'MessagesGetAllChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllChats,
request_deserializer=tl__pb2.ReqMessagesGetAllChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'HelpSetBotUpdatesStatus': grpc.unary_unary_rpc_method_handler(
servicer.HelpSetBotUpdatesStatus,
request_deserializer=tl__pb2.ReqHelpSetBotUpdatesStatus.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetWebPage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetWebPage,
request_deserializer=tl__pb2.ReqMessagesGetWebPage.FromString,
response_serializer=tl__pb2.TypeWebPage.SerializeToString,
),
'MessagesToggleDialogPin': grpc.unary_unary_rpc_method_handler(
servicer.MessagesToggleDialogPin,
request_deserializer=tl__pb2.ReqMessagesToggleDialogPin.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReorderPinnedDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReorderPinnedDialogs,
request_deserializer=tl__pb2.ReqMessagesReorderPinnedDialogs.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPinnedDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPinnedDialogs,
request_deserializer=tl__pb2.ReqMessagesGetPinnedDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesPeerDialogs.SerializeToString,
),
'PhoneRequestCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneRequestCall,
request_deserializer=tl__pb2.ReqPhoneRequestCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneAcceptCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneAcceptCall,
request_deserializer=tl__pb2.ReqPhoneAcceptCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneDiscardCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneDiscardCall,
request_deserializer=tl__pb2.ReqPhoneDiscardCall.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'PhoneReceivedCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneReceivedCall,
request_deserializer=tl__pb2.ReqPhoneReceivedCall.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReportEncryptedSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReportEncryptedSpam,
request_deserializer=tl__pb2.ReqMessagesReportEncryptedSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PaymentsGetPaymentForm': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetPaymentForm,
request_deserializer=tl__pb2.ReqPaymentsGetPaymentForm.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentForm.SerializeToString,
),
'PaymentsSendPaymentForm': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsSendPaymentForm,
request_deserializer=tl__pb2.ReqPaymentsSendPaymentForm.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentResult.SerializeToString,
),
'AccountGetTmpPassword': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetTmpPassword,
request_deserializer=tl__pb2.ReqAccountGetTmpPassword.FromString,
response_serializer=tl__pb2.TypeAccountTmpPassword.SerializeToString,
),
'MessagesSetBotShippingResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotShippingResults,
request_deserializer=tl__pb2.ReqMessagesSetBotShippingResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetBotPrecheckoutResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotPrecheckoutResults,
request_deserializer=tl__pb2.ReqMessagesSetBotPrecheckoutResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetWebFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetWebFile,
request_deserializer=tl__pb2.ReqUploadGetWebFile.FromString,
response_serializer=tl__pb2.TypeUploadWebFile.SerializeToString,
),
'BotsSendCustomRequest': grpc.unary_unary_rpc_method_handler(
servicer.BotsSendCustomRequest,
request_deserializer=tl__pb2.ReqBotsSendCustomRequest.FromString,
response_serializer=tl__pb2.TypeDataJSON.SerializeToString,
),
'BotsAnswerWebhookJSONQuery': grpc.unary_unary_rpc_method_handler(
servicer.BotsAnswerWebhookJSONQuery,
request_deserializer=tl__pb2.ReqBotsAnswerWebhookJSONQuery.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PaymentsGetPaymentReceipt': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetPaymentReceipt,
request_deserializer=tl__pb2.ReqPaymentsGetPaymentReceipt.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentReceipt.SerializeToString,
),
'PaymentsValidateRequestedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsValidateRequestedInfo,
request_deserializer=tl__pb2.ReqPaymentsValidateRequestedInfo.FromString,
response_serializer=tl__pb2.TypePaymentsValidatedRequestedInfo.SerializeToString,
),
'PaymentsGetSavedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetSavedInfo,
request_deserializer=tl__pb2.ReqPaymentsGetSavedInfo.FromString,
response_serializer=tl__pb2.TypePaymentsSavedInfo.SerializeToString,
),
'PaymentsClearSavedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsClearSavedInfo,
request_deserializer=tl__pb2.ReqPaymentsClearSavedInfo.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PhoneGetCallConfig': grpc.unary_unary_rpc_method_handler(
servicer.PhoneGetCallConfig,
request_deserializer=tl__pb2.ReqPhoneGetCallConfig.FromString,
response_serializer=tl__pb2.TypeDataJSON.SerializeToString,
),
'PhoneConfirmCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneConfirmCall,
request_deserializer=tl__pb2.ReqPhoneConfirmCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneSetCallRating': grpc.unary_unary_rpc_method_handler(
servicer.PhoneSetCallRating,
request_deserializer=tl__pb2.ReqPhoneSetCallRating.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'PhoneSaveCallDebug': grpc.unary_unary_rpc_method_handler(
servicer.PhoneSaveCallDebug,
request_deserializer=tl__pb2.ReqPhoneSaveCallDebug.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetCdnFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetCdnFile,
request_deserializer=tl__pb2.ReqUploadGetCdnFile.FromString,
response_serializer=tl__pb2.TypeUploadCdnFile.SerializeToString,
),
'UploadReuploadCdnFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadReuploadCdnFile,
request_deserializer=tl__pb2.ReqUploadReuploadCdnFile.FromString,
response_serializer=tl__pb2.TypeVectorCdnFileHash.SerializeToString,
),
'HelpGetCdnConfig': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetCdnConfig,
request_deserializer=tl__pb2.ReqHelpGetCdnConfig.FromString,
response_serializer=tl__pb2.TypeCdnConfig.SerializeToString,
),
'MessagesUploadMedia': grpc.unary_unary_rpc_method_handler(
servicer.MessagesUploadMedia,
request_deserializer=tl__pb2.ReqMessagesUploadMedia.FromString,
response_serializer=tl__pb2.TypeMessageMedia.SerializeToString,
),
'StickersCreateStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersCreateStickerSet,
request_deserializer=tl__pb2.ReqStickersCreateStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'LangpackGetLangPack': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetLangPack,
request_deserializer=tl__pb2.ReqLangpackGetLangPack.FromString,
response_serializer=tl__pb2.TypeLangPackDifference.SerializeToString,
),
'LangpackGetStrings': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetStrings,
request_deserializer=tl__pb2.ReqLangpackGetStrings.FromString,
response_serializer=tl__pb2.TypeVectorLangPackString.SerializeToString,
),
'LangpackGetDifference': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetDifference,
request_deserializer=tl__pb2.ReqLangpackGetDifference.FromString,
response_serializer=tl__pb2.TypeLangPackDifference.SerializeToString,
),
'LangpackGetLanguages': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetLanguages,
request_deserializer=tl__pb2.ReqLangpackGetLanguages.FromString,
response_serializer=tl__pb2.TypeVectorLangPackLanguage.SerializeToString,
),
'ChannelsEditBanned': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditBanned,
request_deserializer=tl__pb2.ReqChannelsEditBanned.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsGetAdminLog': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetAdminLog,
request_deserializer=tl__pb2.ReqChannelsGetAdminLog.FromString,
response_serializer=tl__pb2.TypeChannelsAdminLogResults.SerializeToString,
),
'StickersRemoveStickerFromSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersRemoveStickerFromSet,
request_deserializer=tl__pb2.ReqStickersRemoveStickerFromSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'StickersChangeStickerPosition': grpc.unary_unary_rpc_method_handler(
servicer.StickersChangeStickerPosition,
request_deserializer=tl__pb2.ReqStickersChangeStickerPosition.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'StickersAddStickerToSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersAddStickerToSet,
request_deserializer=tl__pb2.ReqStickersAddStickerToSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'MessagesSendScreenshotNotification': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendScreenshotNotification,
request_deserializer=tl__pb2.ReqMessagesSendScreenshotNotification.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'UploadGetCdnFileHashes': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetCdnFileHashes,
request_deserializer=tl__pb2.ReqUploadGetCdnFileHashes.FromString,
response_serializer=tl__pb2.TypeVectorCdnFileHash.SerializeToString,
),
'MessagesGetUnreadMentions': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetUnreadMentions,
request_deserializer=tl__pb2.ReqMessagesGetUnreadMentions.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesFaveSticker': grpc.unary_unary_rpc_method_handler(
servicer.MessagesFaveSticker,
request_deserializer=tl__pb2.ReqMessagesFaveSticker.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsSetStickers': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsSetStickers,
request_deserializer=tl__pb2.ReqChannelsSetStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsResetSaved': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResetSaved,
request_deserializer=tl__pb2.ReqContactsResetSaved.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetFavedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFavedStickers,
request_deserializer=tl__pb2.ReqMessagesGetFavedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesFavedStickers.SerializeToString,
),
'ChannelsReadMessageContents': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReadMessageContents,
request_deserializer=tl__pb2.ReqChannelsReadMessageContents.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mtproto.Mtproto', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
import tl_pb2 as tl__pb2
class MtprotoStub(object):
"""Procedures
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.InvokeAfterMsg = channel.unary_unary(
'/mtproto.Mtproto/InvokeAfterMsg',
request_serializer=tl__pb2.ReqInvokeAfterMsg.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.InvokeAfterMsgs = channel.unary_unary(
'/mtproto.Mtproto/InvokeAfterMsgs',
request_serializer=tl__pb2.ReqInvokeAfterMsgs.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.AuthCheckPhone = channel.unary_unary(
'/mtproto.Mtproto/AuthCheckPhone',
request_serializer=tl__pb2.ReqAuthCheckPhone.SerializeToString,
response_deserializer=tl__pb2.TypeAuthCheckedPhone.FromString,
)
self.AuthSendCode = channel.unary_unary(
'/mtproto.Mtproto/AuthSendCode',
request_serializer=tl__pb2.ReqAuthSendCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AuthSignUp = channel.unary_unary(
'/mtproto.Mtproto/AuthSignUp',
request_serializer=tl__pb2.ReqAuthSignUp.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AuthSignIn = channel.unary_unary(
'/mtproto.Mtproto/AuthSignIn',
request_serializer=tl__pb2.ReqAuthSignIn.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AuthLogOut = channel.unary_unary(
'/mtproto.Mtproto/AuthLogOut',
request_serializer=tl__pb2.ReqAuthLogOut.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthResetAuthorizations = channel.unary_unary(
'/mtproto.Mtproto/AuthResetAuthorizations',
request_serializer=tl__pb2.ReqAuthResetAuthorizations.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthSendInvites = channel.unary_unary(
'/mtproto.Mtproto/AuthSendInvites',
request_serializer=tl__pb2.ReqAuthSendInvites.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthExportAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthExportAuthorization',
request_serializer=tl__pb2.ReqAuthExportAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthExportedAuthorization.FromString,
)
self.AuthImportAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthImportAuthorization',
request_serializer=tl__pb2.ReqAuthImportAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.AccountRegisterDevice = channel.unary_unary(
'/mtproto.Mtproto/AccountRegisterDevice',
request_serializer=tl__pb2.ReqAccountRegisterDevice.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUnregisterDevice = channel.unary_unary(
'/mtproto.Mtproto/AccountUnregisterDevice',
request_serializer=tl__pb2.ReqAccountUnregisterDevice.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateNotifySettings',
request_serializer=tl__pb2.ReqAccountUpdateNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountGetNotifySettings',
request_serializer=tl__pb2.ReqAccountGetNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypePeerNotifySettings.FromString,
)
self.AccountResetNotifySettings = channel.unary_unary(
'/mtproto.Mtproto/AccountResetNotifySettings',
request_serializer=tl__pb2.ReqAccountResetNotifySettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateProfile = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateProfile',
request_serializer=tl__pb2.ReqAccountUpdateProfile.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.AccountUpdateStatus = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateStatus',
request_serializer=tl__pb2.ReqAccountUpdateStatus.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetWallPapers = channel.unary_unary(
'/mtproto.Mtproto/AccountGetWallPapers',
request_serializer=tl__pb2.ReqAccountGetWallPapers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorWallPaper.FromString,
)
self.UsersGetUsers = channel.unary_unary(
'/mtproto.Mtproto/UsersGetUsers',
request_serializer=tl__pb2.ReqUsersGetUsers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorUser.FromString,
)
self.UsersGetFullUser = channel.unary_unary(
'/mtproto.Mtproto/UsersGetFullUser',
request_serializer=tl__pb2.ReqUsersGetFullUser.SerializeToString,
response_deserializer=tl__pb2.TypeUserFull.FromString,
)
self.ContactsGetStatuses = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetStatuses',
request_serializer=tl__pb2.ReqContactsGetStatuses.SerializeToString,
response_deserializer=tl__pb2.TypeVectorContactStatus.FromString,
)
self.ContactsGetContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetContacts',
request_serializer=tl__pb2.ReqContactsGetContacts.SerializeToString,
response_deserializer=tl__pb2.TypeContactsContacts.FromString,
)
self.ContactsImportContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsImportContacts',
request_serializer=tl__pb2.ReqContactsImportContacts.SerializeToString,
response_deserializer=tl__pb2.TypeContactsImportedContacts.FromString,
)
self.ContactsSearch = channel.unary_unary(
'/mtproto.Mtproto/ContactsSearch',
request_serializer=tl__pb2.ReqContactsSearch.SerializeToString,
response_deserializer=tl__pb2.TypeContactsFound.FromString,
)
self.ContactsDeleteContact = channel.unary_unary(
'/mtproto.Mtproto/ContactsDeleteContact',
request_serializer=tl__pb2.ReqContactsDeleteContact.SerializeToString,
response_deserializer=tl__pb2.TypeContactsLink.FromString,
)
self.ContactsDeleteContacts = channel.unary_unary(
'/mtproto.Mtproto/ContactsDeleteContacts',
request_serializer=tl__pb2.ReqContactsDeleteContacts.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsBlock = channel.unary_unary(
'/mtproto.Mtproto/ContactsBlock',
request_serializer=tl__pb2.ReqContactsBlock.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsUnblock = channel.unary_unary(
'/mtproto.Mtproto/ContactsUnblock',
request_serializer=tl__pb2.ReqContactsUnblock.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsGetBlocked = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetBlocked',
request_serializer=tl__pb2.ReqContactsGetBlocked.SerializeToString,
response_deserializer=tl__pb2.TypeContactsBlocked.FromString,
)
self.MessagesGetMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessages',
request_serializer=tl__pb2.ReqMessagesGetMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesGetDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDialogs',
request_serializer=tl__pb2.ReqMessagesGetDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesDialogs.FromString,
)
self.MessagesGetHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetHistory',
request_serializer=tl__pb2.ReqMessagesGetHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesSearch = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearch',
request_serializer=tl__pb2.ReqMessagesSearch.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesReadHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadHistory',
request_serializer=tl__pb2.ReqMessagesReadHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.MessagesDeleteHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteHistory',
request_serializer=tl__pb2.ReqMessagesDeleteHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedHistory.FromString,
)
self.MessagesDeleteMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteMessages',
request_serializer=tl__pb2.ReqMessagesDeleteMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.MessagesReceivedMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesReceivedMessages',
request_serializer=tl__pb2.ReqMessagesReceivedMessages.SerializeToString,
response_deserializer=tl__pb2.TypeVectorReceivedNotifyMessage.FromString,
)
self.MessagesSetTyping = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetTyping',
request_serializer=tl__pb2.ReqMessagesSetTyping.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendMessage',
request_serializer=tl__pb2.ReqMessagesSendMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSendMedia = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendMedia',
request_serializer=tl__pb2.ReqMessagesSendMedia.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesForwardMessages = channel.unary_unary(
'/mtproto.Mtproto/MessagesForwardMessages',
request_serializer=tl__pb2.ReqMessagesForwardMessages.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetChats',
request_serializer=tl__pb2.ReqMessagesGetChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.MessagesGetFullChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFullChat',
request_serializer=tl__pb2.ReqMessagesGetFullChat.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChatFull.FromString,
)
self.MessagesEditChatTitle = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatTitle',
request_serializer=tl__pb2.ReqMessagesEditChatTitle.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditChatPhoto = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatPhoto',
request_serializer=tl__pb2.ReqMessagesEditChatPhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesAddChatUser = channel.unary_unary(
'/mtproto.Mtproto/MessagesAddChatUser',
request_serializer=tl__pb2.ReqMessagesAddChatUser.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesDeleteChatUser = channel.unary_unary(
'/mtproto.Mtproto/MessagesDeleteChatUser',
request_serializer=tl__pb2.ReqMessagesDeleteChatUser.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesCreateChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesCreateChat',
request_serializer=tl__pb2.ReqMessagesCreateChat.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.UpdatesGetState = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetState',
request_serializer=tl__pb2.ReqUpdatesGetState.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesState.FromString,
)
self.UpdatesGetDifference = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetDifference',
request_serializer=tl__pb2.ReqUpdatesGetDifference.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesDifference.FromString,
)
self.PhotosUpdateProfilePhoto = channel.unary_unary(
'/mtproto.Mtproto/PhotosUpdateProfilePhoto',
request_serializer=tl__pb2.ReqPhotosUpdateProfilePhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUserProfilePhoto.FromString,
)
self.PhotosUploadProfilePhoto = channel.unary_unary(
'/mtproto.Mtproto/PhotosUploadProfilePhoto',
request_serializer=tl__pb2.ReqPhotosUploadProfilePhoto.SerializeToString,
response_deserializer=tl__pb2.TypePhotosPhoto.FromString,
)
self.UploadSaveFilePart = channel.unary_unary(
'/mtproto.Mtproto/UploadSaveFilePart',
request_serializer=tl__pb2.ReqUploadSaveFilePart.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetFile',
request_serializer=tl__pb2.ReqUploadGetFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadFile.FromString,
)
self.HelpGetConfig = channel.unary_unary(
'/mtproto.Mtproto/HelpGetConfig',
request_serializer=tl__pb2.ReqHelpGetConfig.SerializeToString,
response_deserializer=tl__pb2.TypeConfig.FromString,
)
self.HelpGetNearestDc = channel.unary_unary(
'/mtproto.Mtproto/HelpGetNearestDc',
request_serializer=tl__pb2.ReqHelpGetNearestDc.SerializeToString,
response_deserializer=tl__pb2.TypeNearestDc.FromString,
)
self.HelpGetAppUpdate = channel.unary_unary(
'/mtproto.Mtproto/HelpGetAppUpdate',
request_serializer=tl__pb2.ReqHelpGetAppUpdate.SerializeToString,
response_deserializer=tl__pb2.TypeHelpAppUpdate.FromString,
)
self.HelpSaveAppLog = channel.unary_unary(
'/mtproto.Mtproto/HelpSaveAppLog',
request_serializer=tl__pb2.ReqHelpSaveAppLog.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.HelpGetInviteText = channel.unary_unary(
'/mtproto.Mtproto/HelpGetInviteText',
request_serializer=tl__pb2.ReqHelpGetInviteText.SerializeToString,
response_deserializer=tl__pb2.TypeHelpInviteText.FromString,
)
self.PhotosDeletePhotos = channel.unary_unary(
'/mtproto.Mtproto/PhotosDeletePhotos',
request_serializer=tl__pb2.ReqPhotosDeletePhotos.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLong.FromString,
)
self.PhotosGetUserPhotos = channel.unary_unary(
'/mtproto.Mtproto/PhotosGetUserPhotos',
request_serializer=tl__pb2.ReqPhotosGetUserPhotos.SerializeToString,
response_deserializer=tl__pb2.TypePhotosPhotos.FromString,
)
self.MessagesForwardMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesForwardMessage',
request_serializer=tl__pb2.ReqMessagesForwardMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetDhConfig = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDhConfig',
request_serializer=tl__pb2.ReqMessagesGetDhConfig.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesDhConfig.FromString,
)
self.MessagesRequestEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesRequestEncryption',
request_serializer=tl__pb2.ReqMessagesRequestEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeEncryptedChat.FromString,
)
self.MessagesAcceptEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesAcceptEncryption',
request_serializer=tl__pb2.ReqMessagesAcceptEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeEncryptedChat.FromString,
)
self.MessagesDiscardEncryption = channel.unary_unary(
'/mtproto.Mtproto/MessagesDiscardEncryption',
request_serializer=tl__pb2.ReqMessagesDiscardEncryption.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetEncryptedTyping = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetEncryptedTyping',
request_serializer=tl__pb2.ReqMessagesSetEncryptedTyping.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReadEncryptedHistory = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadEncryptedHistory',
request_serializer=tl__pb2.ReqMessagesReadEncryptedHistory.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendEncrypted = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncrypted',
request_serializer=tl__pb2.ReqMessagesSendEncrypted.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesSendEncryptedFile = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncryptedFile',
request_serializer=tl__pb2.ReqMessagesSendEncryptedFile.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesSendEncryptedService = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendEncryptedService',
request_serializer=tl__pb2.ReqMessagesSendEncryptedService.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSentEncryptedMessage.FromString,
)
self.MessagesReceivedQueue = channel.unary_unary(
'/mtproto.Mtproto/MessagesReceivedQueue',
request_serializer=tl__pb2.ReqMessagesReceivedQueue.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLong.FromString,
)
self.UploadSaveBigFilePart = channel.unary_unary(
'/mtproto.Mtproto/UploadSaveBigFilePart',
request_serializer=tl__pb2.ReqUploadSaveBigFilePart.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.InitConnection = channel.unary_unary(
'/mtproto.Mtproto/InitConnection',
request_serializer=tl__pb2.ReqInitConnection.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.HelpGetSupport = channel.unary_unary(
'/mtproto.Mtproto/HelpGetSupport',
request_serializer=tl__pb2.ReqHelpGetSupport.SerializeToString,
response_deserializer=tl__pb2.TypeHelpSupport.FromString,
)
self.AuthBindTempAuthKey = channel.unary_unary(
'/mtproto.Mtproto/AuthBindTempAuthKey',
request_serializer=tl__pb2.ReqAuthBindTempAuthKey.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsExportCard = channel.unary_unary(
'/mtproto.Mtproto/ContactsExportCard',
request_serializer=tl__pb2.ReqContactsExportCard.SerializeToString,
response_deserializer=tl__pb2.TypeVectorInt.FromString,
)
self.ContactsImportCard = channel.unary_unary(
'/mtproto.Mtproto/ContactsImportCard',
request_serializer=tl__pb2.ReqContactsImportCard.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.MessagesReadMessageContents = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadMessageContents',
request_serializer=tl__pb2.ReqMessagesReadMessageContents.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.AccountCheckUsername = channel.unary_unary(
'/mtproto.Mtproto/AccountCheckUsername',
request_serializer=tl__pb2.ReqAccountCheckUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountUpdateUsername = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateUsername',
request_serializer=tl__pb2.ReqAccountUpdateUsername.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.AccountGetPrivacy = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPrivacy',
request_serializer=tl__pb2.ReqAccountGetPrivacy.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPrivacyRules.FromString,
)
self.AccountSetPrivacy = channel.unary_unary(
'/mtproto.Mtproto/AccountSetPrivacy',
request_serializer=tl__pb2.ReqAccountSetPrivacy.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPrivacyRules.FromString,
)
self.AccountDeleteAccount = channel.unary_unary(
'/mtproto.Mtproto/AccountDeleteAccount',
request_serializer=tl__pb2.ReqAccountDeleteAccount.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetAccountTTL = channel.unary_unary(
'/mtproto.Mtproto/AccountGetAccountTTL',
request_serializer=tl__pb2.ReqAccountGetAccountTTL.SerializeToString,
response_deserializer=tl__pb2.TypeAccountDaysTTL.FromString,
)
self.AccountSetAccountTTL = channel.unary_unary(
'/mtproto.Mtproto/AccountSetAccountTTL',
request_serializer=tl__pb2.ReqAccountSetAccountTTL.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.InvokeWithLayer = channel.unary_unary(
'/mtproto.Mtproto/InvokeWithLayer',
request_serializer=tl__pb2.ReqInvokeWithLayer.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.ContactsResolveUsername = channel.unary_unary(
'/mtproto.Mtproto/ContactsResolveUsername',
request_serializer=tl__pb2.ReqContactsResolveUsername.SerializeToString,
response_deserializer=tl__pb2.TypeContactsResolvedPeer.FromString,
)
self.AccountSendChangePhoneCode = channel.unary_unary(
'/mtproto.Mtproto/AccountSendChangePhoneCode',
request_serializer=tl__pb2.ReqAccountSendChangePhoneCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AccountChangePhone = channel.unary_unary(
'/mtproto.Mtproto/AccountChangePhone',
request_serializer=tl__pb2.ReqAccountChangePhone.SerializeToString,
response_deserializer=tl__pb2.TypeUser.FromString,
)
self.MessagesGetAllStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllStickers',
request_serializer=tl__pb2.ReqMessagesGetAllStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAllStickers.FromString,
)
self.AccountUpdateDeviceLocked = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdateDeviceLocked',
request_serializer=tl__pb2.ReqAccountUpdateDeviceLocked.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetPassword = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPassword',
request_serializer=tl__pb2.ReqAccountGetPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPassword.FromString,
)
self.AuthCheckPassword = channel.unary_unary(
'/mtproto.Mtproto/AuthCheckPassword',
request_serializer=tl__pb2.ReqAuthCheckPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.MessagesGetWebPagePreview = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetWebPagePreview',
request_serializer=tl__pb2.ReqMessagesGetWebPagePreview.SerializeToString,
response_deserializer=tl__pb2.TypeMessageMedia.FromString,
)
self.AccountGetAuthorizations = channel.unary_unary(
'/mtproto.Mtproto/AccountGetAuthorizations',
request_serializer=tl__pb2.ReqAccountGetAuthorizations.SerializeToString,
response_deserializer=tl__pb2.TypeAccountAuthorizations.FromString,
)
self.AccountResetAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AccountResetAuthorization',
request_serializer=tl__pb2.ReqAccountResetAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AccountGetPasswordSettings = channel.unary_unary(
'/mtproto.Mtproto/AccountGetPasswordSettings',
request_serializer=tl__pb2.ReqAccountGetPasswordSettings.SerializeToString,
response_deserializer=tl__pb2.TypeAccountPasswordSettings.FromString,
)
self.AccountUpdatePasswordSettings = channel.unary_unary(
'/mtproto.Mtproto/AccountUpdatePasswordSettings',
request_serializer=tl__pb2.ReqAccountUpdatePasswordSettings.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthRequestPasswordRecovery = channel.unary_unary(
'/mtproto.Mtproto/AuthRequestPasswordRecovery',
request_serializer=tl__pb2.ReqAuthRequestPasswordRecovery.SerializeToString,
response_deserializer=tl__pb2.TypeAuthPasswordRecovery.FromString,
)
self.AuthRecoverPassword = channel.unary_unary(
'/mtproto.Mtproto/AuthRecoverPassword',
request_serializer=tl__pb2.ReqAuthRecoverPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.InvokeWithoutUpdates = channel.unary_unary(
'/mtproto.Mtproto/InvokeWithoutUpdates',
request_serializer=tl__pb2.ReqInvokeWithoutUpdates.SerializeToString,
response_deserializer=google_dot_protobuf_dot_any__pb2.Any.FromString,
)
self.MessagesExportChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesExportChatInvite',
request_serializer=tl__pb2.ReqMessagesExportChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeExportedChatInvite.FromString,
)
self.MessagesCheckChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesCheckChatInvite',
request_serializer=tl__pb2.ReqMessagesCheckChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeChatInvite.FromString,
)
self.MessagesImportChatInvite = channel.unary_unary(
'/mtproto.Mtproto/MessagesImportChatInvite',
request_serializer=tl__pb2.ReqMessagesImportChatInvite.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesGetStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetStickerSet',
request_serializer=tl__pb2.ReqMessagesGetStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.MessagesInstallStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesInstallStickerSet',
request_serializer=tl__pb2.ReqMessagesInstallStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSetInstallResult.FromString,
)
self.MessagesUninstallStickerSet = channel.unary_unary(
'/mtproto.Mtproto/MessagesUninstallStickerSet',
request_serializer=tl__pb2.ReqMessagesUninstallStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.AuthImportBotAuthorization = channel.unary_unary(
'/mtproto.Mtproto/AuthImportBotAuthorization',
request_serializer=tl__pb2.ReqAuthImportBotAuthorization.SerializeToString,
response_deserializer=tl__pb2.TypeAuthAuthorization.FromString,
)
self.MessagesStartBot = channel.unary_unary(
'/mtproto.Mtproto/MessagesStartBot',
request_serializer=tl__pb2.ReqMessagesStartBot.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.HelpGetAppChangelog = channel.unary_unary(
'/mtproto.Mtproto/HelpGetAppChangelog',
request_serializer=tl__pb2.ReqHelpGetAppChangelog.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesReportSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesReportSpam',
request_serializer=tl__pb2.ReqMessagesReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMessagesViews = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessagesViews',
request_serializer=tl__pb2.ReqMessagesGetMessagesViews.SerializeToString,
response_deserializer=tl__pb2.TypeVectorInt.FromString,
)
self.UpdatesGetChannelDifference = channel.unary_unary(
'/mtproto.Mtproto/UpdatesGetChannelDifference',
request_serializer=tl__pb2.ReqUpdatesGetChannelDifference.SerializeToString,
response_deserializer=tl__pb2.TypeUpdatesChannelDifference.FromString,
)
self.ChannelsReadHistory = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReadHistory',
request_serializer=tl__pb2.ReqChannelsReadHistory.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsDeleteMessages = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteMessages',
request_serializer=tl__pb2.ReqChannelsDeleteMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedMessages.FromString,
)
self.ChannelsDeleteUserHistory = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteUserHistory',
request_serializer=tl__pb2.ReqChannelsDeleteUserHistory.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAffectedHistory.FromString,
)
self.ChannelsReportSpam = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReportSpam',
request_serializer=tl__pb2.ReqChannelsReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsGetMessages = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetMessages',
request_serializer=tl__pb2.ReqChannelsGetMessages.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.ChannelsGetParticipants = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetParticipants',
request_serializer=tl__pb2.ReqChannelsGetParticipants.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsChannelParticipants.FromString,
)
self.ChannelsGetParticipant = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetParticipant',
request_serializer=tl__pb2.ReqChannelsGetParticipant.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsChannelParticipant.FromString,
)
self.ChannelsGetChannels = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetChannels',
request_serializer=tl__pb2.ReqChannelsGetChannels.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.ChannelsGetFullChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetFullChannel',
request_serializer=tl__pb2.ReqChannelsGetFullChannel.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChatFull.FromString,
)
self.ChannelsCreateChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsCreateChannel',
request_serializer=tl__pb2.ReqChannelsCreateChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditAbout = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditAbout',
request_serializer=tl__pb2.ReqChannelsEditAbout.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsEditAdmin = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditAdmin',
request_serializer=tl__pb2.ReqChannelsEditAdmin.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditTitle = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditTitle',
request_serializer=tl__pb2.ReqChannelsEditTitle.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsEditPhoto = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditPhoto',
request_serializer=tl__pb2.ReqChannelsEditPhoto.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsCheckUsername = channel.unary_unary(
'/mtproto.Mtproto/ChannelsCheckUsername',
request_serializer=tl__pb2.ReqChannelsCheckUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsUpdateUsername = channel.unary_unary(
'/mtproto.Mtproto/ChannelsUpdateUsername',
request_serializer=tl__pb2.ReqChannelsUpdateUsername.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsJoinChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsJoinChannel',
request_serializer=tl__pb2.ReqChannelsJoinChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsLeaveChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsLeaveChannel',
request_serializer=tl__pb2.ReqChannelsLeaveChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsInviteToChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsInviteToChannel',
request_serializer=tl__pb2.ReqChannelsInviteToChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsExportInvite = channel.unary_unary(
'/mtproto.Mtproto/ChannelsExportInvite',
request_serializer=tl__pb2.ReqChannelsExportInvite.SerializeToString,
response_deserializer=tl__pb2.TypeExportedChatInvite.FromString,
)
self.ChannelsDeleteChannel = channel.unary_unary(
'/mtproto.Mtproto/ChannelsDeleteChannel',
request_serializer=tl__pb2.ReqChannelsDeleteChannel.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesToggleChatAdmins = channel.unary_unary(
'/mtproto.Mtproto/MessagesToggleChatAdmins',
request_serializer=tl__pb2.ReqMessagesToggleChatAdmins.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditChatAdmin = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditChatAdmin',
request_serializer=tl__pb2.ReqMessagesEditChatAdmin.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesMigrateChat = channel.unary_unary(
'/mtproto.Mtproto/MessagesMigrateChat',
request_serializer=tl__pb2.ReqMessagesMigrateChat.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSearchGlobal = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearchGlobal',
request_serializer=tl__pb2.ReqMessagesSearchGlobal.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.AccountReportPeer = channel.unary_unary(
'/mtproto.Mtproto/AccountReportPeer',
request_serializer=tl__pb2.ReqAccountReportPeer.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReorderStickerSets = channel.unary_unary(
'/mtproto.Mtproto/MessagesReorderStickerSets',
request_serializer=tl__pb2.ReqMessagesReorderStickerSets.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.HelpGetTermsOfService = channel.unary_unary(
'/mtproto.Mtproto/HelpGetTermsOfService',
request_serializer=tl__pb2.ReqHelpGetTermsOfService.SerializeToString,
response_deserializer=tl__pb2.TypeHelpTermsOfService.FromString,
)
self.MessagesGetDocumentByHash = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetDocumentByHash',
request_serializer=tl__pb2.ReqMessagesGetDocumentByHash.SerializeToString,
response_deserializer=tl__pb2.TypeDocument.FromString,
)
self.MessagesSearchGifs = channel.unary_unary(
'/mtproto.Mtproto/MessagesSearchGifs',
request_serializer=tl__pb2.ReqMessagesSearchGifs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFoundGifs.FromString,
)
self.MessagesGetSavedGifs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetSavedGifs',
request_serializer=tl__pb2.ReqMessagesGetSavedGifs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesSavedGifs.FromString,
)
self.MessagesSaveGif = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveGif',
request_serializer=tl__pb2.ReqMessagesSaveGif.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetInlineBotResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetInlineBotResults',
request_serializer=tl__pb2.ReqMessagesGetInlineBotResults.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesBotResults.FromString,
)
self.MessagesSetInlineBotResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetInlineBotResults',
request_serializer=tl__pb2.ReqMessagesSetInlineBotResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSendInlineBotResult = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendInlineBotResult',
request_serializer=tl__pb2.ReqMessagesSendInlineBotResult.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsToggleInvites = channel.unary_unary(
'/mtproto.Mtproto/ChannelsToggleInvites',
request_serializer=tl__pb2.ReqChannelsToggleInvites.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsExportMessageLink = channel.unary_unary(
'/mtproto.Mtproto/ChannelsExportMessageLink',
request_serializer=tl__pb2.ReqChannelsExportMessageLink.SerializeToString,
response_deserializer=tl__pb2.TypeExportedMessageLink.FromString,
)
self.ChannelsToggleSignatures = channel.unary_unary(
'/mtproto.Mtproto/ChannelsToggleSignatures',
request_serializer=tl__pb2.ReqChannelsToggleSignatures.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesHideReportSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesHideReportSpam',
request_serializer=tl__pb2.ReqMessagesHideReportSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPeerSettings = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPeerSettings',
request_serializer=tl__pb2.ReqMessagesGetPeerSettings.SerializeToString,
response_deserializer=tl__pb2.TypePeerSettings.FromString,
)
self.ChannelsUpdatePinnedMessage = channel.unary_unary(
'/mtproto.Mtproto/ChannelsUpdatePinnedMessage',
request_serializer=tl__pb2.ReqChannelsUpdatePinnedMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.AuthResendCode = channel.unary_unary(
'/mtproto.Mtproto/AuthResendCode',
request_serializer=tl__pb2.ReqAuthResendCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AuthCancelCode = channel.unary_unary(
'/mtproto.Mtproto/AuthCancelCode',
request_serializer=tl__pb2.ReqAuthCancelCode.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMessageEditData = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMessageEditData',
request_serializer=tl__pb2.ReqMessagesGetMessageEditData.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessageEditData.FromString,
)
self.MessagesEditMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditMessage',
request_serializer=tl__pb2.ReqMessagesEditMessage.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesEditInlineBotMessage = channel.unary_unary(
'/mtproto.Mtproto/MessagesEditInlineBotMessage',
request_serializer=tl__pb2.ReqMessagesEditInlineBotMessage.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetBotCallbackAnswer = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetBotCallbackAnswer',
request_serializer=tl__pb2.ReqMessagesGetBotCallbackAnswer.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesBotCallbackAnswer.FromString,
)
self.MessagesSetBotCallbackAnswer = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotCallbackAnswer',
request_serializer=tl__pb2.ReqMessagesSetBotCallbackAnswer.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsGetTopPeers = channel.unary_unary(
'/mtproto.Mtproto/ContactsGetTopPeers',
request_serializer=tl__pb2.ReqContactsGetTopPeers.SerializeToString,
response_deserializer=tl__pb2.TypeContactsTopPeers.FromString,
)
self.ContactsResetTopPeerRating = channel.unary_unary(
'/mtproto.Mtproto/ContactsResetTopPeerRating',
request_serializer=tl__pb2.ReqContactsResetTopPeerRating.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPeerDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPeerDialogs',
request_serializer=tl__pb2.ReqMessagesGetPeerDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesPeerDialogs.FromString,
)
self.MessagesSaveDraft = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveDraft',
request_serializer=tl__pb2.ReqMessagesSaveDraft.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetAllDrafts = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllDrafts',
request_serializer=tl__pb2.ReqMessagesGetAllDrafts.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.AccountSendConfirmPhoneCode = channel.unary_unary(
'/mtproto.Mtproto/AccountSendConfirmPhoneCode',
request_serializer=tl__pb2.ReqAccountSendConfirmPhoneCode.SerializeToString,
response_deserializer=tl__pb2.TypeAuthSentCode.FromString,
)
self.AccountConfirmPhone = channel.unary_unary(
'/mtproto.Mtproto/AccountConfirmPhone',
request_serializer=tl__pb2.ReqAccountConfirmPhone.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetFeaturedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFeaturedStickers',
request_serializer=tl__pb2.ReqMessagesGetFeaturedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFeaturedStickers.FromString,
)
self.MessagesReadFeaturedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesReadFeaturedStickers',
request_serializer=tl__pb2.ReqMessagesReadFeaturedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetRecentStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetRecentStickers',
request_serializer=tl__pb2.ReqMessagesGetRecentStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesRecentStickers.FromString,
)
self.MessagesSaveRecentSticker = channel.unary_unary(
'/mtproto.Mtproto/MessagesSaveRecentSticker',
request_serializer=tl__pb2.ReqMessagesSaveRecentSticker.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesClearRecentStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesClearRecentStickers',
request_serializer=tl__pb2.ReqMessagesClearRecentStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetArchivedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetArchivedStickers',
request_serializer=tl__pb2.ReqMessagesGetArchivedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesArchivedStickers.FromString,
)
self.ChannelsGetAdminedPublicChannels = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetAdminedPublicChannels',
request_serializer=tl__pb2.ReqChannelsGetAdminedPublicChannels.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.AuthDropTempAuthKeys = channel.unary_unary(
'/mtproto.Mtproto/AuthDropTempAuthKeys',
request_serializer=tl__pb2.ReqAuthDropTempAuthKeys.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetGameScore = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetGameScore',
request_serializer=tl__pb2.ReqMessagesSetGameScore.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.MessagesSetInlineGameScore = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetInlineGameScore',
request_serializer=tl__pb2.ReqMessagesSetInlineGameScore.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetMaskStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetMaskStickers',
request_serializer=tl__pb2.ReqMessagesGetMaskStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesAllStickers.FromString,
)
self.MessagesGetAttachedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAttachedStickers',
request_serializer=tl__pb2.ReqMessagesGetAttachedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeVectorStickerSetCovered.FromString,
)
self.MessagesGetGameHighScores = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetGameHighScores',
request_serializer=tl__pb2.ReqMessagesGetGameHighScores.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesHighScores.FromString,
)
self.MessagesGetInlineGameHighScores = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetInlineGameHighScores',
request_serializer=tl__pb2.ReqMessagesGetInlineGameHighScores.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesHighScores.FromString,
)
self.MessagesGetCommonChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetCommonChats',
request_serializer=tl__pb2.ReqMessagesGetCommonChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.MessagesGetAllChats = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetAllChats',
request_serializer=tl__pb2.ReqMessagesGetAllChats.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesChats.FromString,
)
self.HelpSetBotUpdatesStatus = channel.unary_unary(
'/mtproto.Mtproto/HelpSetBotUpdatesStatus',
request_serializer=tl__pb2.ReqHelpSetBotUpdatesStatus.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetWebPage = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetWebPage',
request_serializer=tl__pb2.ReqMessagesGetWebPage.SerializeToString,
response_deserializer=tl__pb2.TypeWebPage.FromString,
)
self.MessagesToggleDialogPin = channel.unary_unary(
'/mtproto.Mtproto/MessagesToggleDialogPin',
request_serializer=tl__pb2.ReqMessagesToggleDialogPin.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReorderPinnedDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesReorderPinnedDialogs',
request_serializer=tl__pb2.ReqMessagesReorderPinnedDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetPinnedDialogs = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetPinnedDialogs',
request_serializer=tl__pb2.ReqMessagesGetPinnedDialogs.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesPeerDialogs.FromString,
)
self.PhoneRequestCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneRequestCall',
request_serializer=tl__pb2.ReqPhoneRequestCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneAcceptCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneAcceptCall',
request_serializer=tl__pb2.ReqPhoneAcceptCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneDiscardCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneDiscardCall',
request_serializer=tl__pb2.ReqPhoneDiscardCall.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.PhoneReceivedCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneReceivedCall',
request_serializer=tl__pb2.ReqPhoneReceivedCall.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesReportEncryptedSpam = channel.unary_unary(
'/mtproto.Mtproto/MessagesReportEncryptedSpam',
request_serializer=tl__pb2.ReqMessagesReportEncryptedSpam.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PaymentsGetPaymentForm = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetPaymentForm',
request_serializer=tl__pb2.ReqPaymentsGetPaymentForm.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentForm.FromString,
)
self.PaymentsSendPaymentForm = channel.unary_unary(
'/mtproto.Mtproto/PaymentsSendPaymentForm',
request_serializer=tl__pb2.ReqPaymentsSendPaymentForm.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentResult.FromString,
)
self.AccountGetTmpPassword = channel.unary_unary(
'/mtproto.Mtproto/AccountGetTmpPassword',
request_serializer=tl__pb2.ReqAccountGetTmpPassword.SerializeToString,
response_deserializer=tl__pb2.TypeAccountTmpPassword.FromString,
)
self.MessagesSetBotShippingResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotShippingResults',
request_serializer=tl__pb2.ReqMessagesSetBotShippingResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesSetBotPrecheckoutResults = channel.unary_unary(
'/mtproto.Mtproto/MessagesSetBotPrecheckoutResults',
request_serializer=tl__pb2.ReqMessagesSetBotPrecheckoutResults.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetWebFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetWebFile',
request_serializer=tl__pb2.ReqUploadGetWebFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadWebFile.FromString,
)
self.BotsSendCustomRequest = channel.unary_unary(
'/mtproto.Mtproto/BotsSendCustomRequest',
request_serializer=tl__pb2.ReqBotsSendCustomRequest.SerializeToString,
response_deserializer=tl__pb2.TypeDataJSON.FromString,
)
self.BotsAnswerWebhookJSONQuery = channel.unary_unary(
'/mtproto.Mtproto/BotsAnswerWebhookJSONQuery',
request_serializer=tl__pb2.ReqBotsAnswerWebhookJSONQuery.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PaymentsGetPaymentReceipt = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetPaymentReceipt',
request_serializer=tl__pb2.ReqPaymentsGetPaymentReceipt.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsPaymentReceipt.FromString,
)
self.PaymentsValidateRequestedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsValidateRequestedInfo',
request_serializer=tl__pb2.ReqPaymentsValidateRequestedInfo.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsValidatedRequestedInfo.FromString,
)
self.PaymentsGetSavedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsGetSavedInfo',
request_serializer=tl__pb2.ReqPaymentsGetSavedInfo.SerializeToString,
response_deserializer=tl__pb2.TypePaymentsSavedInfo.FromString,
)
self.PaymentsClearSavedInfo = channel.unary_unary(
'/mtproto.Mtproto/PaymentsClearSavedInfo',
request_serializer=tl__pb2.ReqPaymentsClearSavedInfo.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.PhoneGetCallConfig = channel.unary_unary(
'/mtproto.Mtproto/PhoneGetCallConfig',
request_serializer=tl__pb2.ReqPhoneGetCallConfig.SerializeToString,
response_deserializer=tl__pb2.TypeDataJSON.FromString,
)
self.PhoneConfirmCall = channel.unary_unary(
'/mtproto.Mtproto/PhoneConfirmCall',
request_serializer=tl__pb2.ReqPhoneConfirmCall.SerializeToString,
response_deserializer=tl__pb2.TypePhonePhoneCall.FromString,
)
self.PhoneSetCallRating = channel.unary_unary(
'/mtproto.Mtproto/PhoneSetCallRating',
request_serializer=tl__pb2.ReqPhoneSetCallRating.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.PhoneSaveCallDebug = channel.unary_unary(
'/mtproto.Mtproto/PhoneSaveCallDebug',
request_serializer=tl__pb2.ReqPhoneSaveCallDebug.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.UploadGetCdnFile = channel.unary_unary(
'/mtproto.Mtproto/UploadGetCdnFile',
request_serializer=tl__pb2.ReqUploadGetCdnFile.SerializeToString,
response_deserializer=tl__pb2.TypeUploadCdnFile.FromString,
)
self.UploadReuploadCdnFile = channel.unary_unary(
'/mtproto.Mtproto/UploadReuploadCdnFile',
request_serializer=tl__pb2.ReqUploadReuploadCdnFile.SerializeToString,
response_deserializer=tl__pb2.TypeVectorCdnFileHash.FromString,
)
self.HelpGetCdnConfig = channel.unary_unary(
'/mtproto.Mtproto/HelpGetCdnConfig',
request_serializer=tl__pb2.ReqHelpGetCdnConfig.SerializeToString,
response_deserializer=tl__pb2.TypeCdnConfig.FromString,
)
self.MessagesUploadMedia = channel.unary_unary(
'/mtproto.Mtproto/MessagesUploadMedia',
request_serializer=tl__pb2.ReqMessagesUploadMedia.SerializeToString,
response_deserializer=tl__pb2.TypeMessageMedia.FromString,
)
self.StickersCreateStickerSet = channel.unary_unary(
'/mtproto.Mtproto/StickersCreateStickerSet',
request_serializer=tl__pb2.ReqStickersCreateStickerSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.LangpackGetLangPack = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetLangPack',
request_serializer=tl__pb2.ReqLangpackGetLangPack.SerializeToString,
response_deserializer=tl__pb2.TypeLangPackDifference.FromString,
)
self.LangpackGetStrings = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetStrings',
request_serializer=tl__pb2.ReqLangpackGetStrings.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLangPackString.FromString,
)
self.LangpackGetDifference = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetDifference',
request_serializer=tl__pb2.ReqLangpackGetDifference.SerializeToString,
response_deserializer=tl__pb2.TypeLangPackDifference.FromString,
)
self.LangpackGetLanguages = channel.unary_unary(
'/mtproto.Mtproto/LangpackGetLanguages',
request_serializer=tl__pb2.ReqLangpackGetLanguages.SerializeToString,
response_deserializer=tl__pb2.TypeVectorLangPackLanguage.FromString,
)
self.ChannelsEditBanned = channel.unary_unary(
'/mtproto.Mtproto/ChannelsEditBanned',
request_serializer=tl__pb2.ReqChannelsEditBanned.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.ChannelsGetAdminLog = channel.unary_unary(
'/mtproto.Mtproto/ChannelsGetAdminLog',
request_serializer=tl__pb2.ReqChannelsGetAdminLog.SerializeToString,
response_deserializer=tl__pb2.TypeChannelsAdminLogResults.FromString,
)
self.StickersRemoveStickerFromSet = channel.unary_unary(
'/mtproto.Mtproto/StickersRemoveStickerFromSet',
request_serializer=tl__pb2.ReqStickersRemoveStickerFromSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.StickersChangeStickerPosition = channel.unary_unary(
'/mtproto.Mtproto/StickersChangeStickerPosition',
request_serializer=tl__pb2.ReqStickersChangeStickerPosition.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.StickersAddStickerToSet = channel.unary_unary(
'/mtproto.Mtproto/StickersAddStickerToSet',
request_serializer=tl__pb2.ReqStickersAddStickerToSet.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesStickerSet.FromString,
)
self.MessagesSendScreenshotNotification = channel.unary_unary(
'/mtproto.Mtproto/MessagesSendScreenshotNotification',
request_serializer=tl__pb2.ReqMessagesSendScreenshotNotification.SerializeToString,
response_deserializer=tl__pb2.TypeUpdates.FromString,
)
self.UploadGetCdnFileHashes = channel.unary_unary(
'/mtproto.Mtproto/UploadGetCdnFileHashes',
request_serializer=tl__pb2.ReqUploadGetCdnFileHashes.SerializeToString,
response_deserializer=tl__pb2.TypeVectorCdnFileHash.FromString,
)
self.MessagesGetUnreadMentions = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetUnreadMentions',
request_serializer=tl__pb2.ReqMessagesGetUnreadMentions.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesMessages.FromString,
)
self.MessagesFaveSticker = channel.unary_unary(
'/mtproto.Mtproto/MessagesFaveSticker',
request_serializer=tl__pb2.ReqMessagesFaveSticker.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ChannelsSetStickers = channel.unary_unary(
'/mtproto.Mtproto/ChannelsSetStickers',
request_serializer=tl__pb2.ReqChannelsSetStickers.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.ContactsResetSaved = channel.unary_unary(
'/mtproto.Mtproto/ContactsResetSaved',
request_serializer=tl__pb2.ReqContactsResetSaved.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
self.MessagesGetFavedStickers = channel.unary_unary(
'/mtproto.Mtproto/MessagesGetFavedStickers',
request_serializer=tl__pb2.ReqMessagesGetFavedStickers.SerializeToString,
response_deserializer=tl__pb2.TypeMessagesFavedStickers.FromString,
)
self.ChannelsReadMessageContents = channel.unary_unary(
'/mtproto.Mtproto/ChannelsReadMessageContents',
request_serializer=tl__pb2.ReqChannelsReadMessageContents.SerializeToString,
response_deserializer=tl__pb2.TypeBool.FromString,
)
class MtprotoServicer(object):
"""Procedures
"""
def InvokeAfterMsg(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeAfterMsgs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCheckPhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSendCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSignUp(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSignIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthLogOut(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthResetAuthorizations(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthSendInvites(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthExportAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthImportAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountRegisterDevice(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUnregisterDevice(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountResetNotifySettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateProfile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetWallPapers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UsersGetUsers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UsersGetFullUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetStatuses(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsImportContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsSearch(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsDeleteContact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsDeleteContacts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsBlock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsUnblock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetBlocked(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearch(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReceivedMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetTyping(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendMedia(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesForwardMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFullChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatTitle(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatPhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesAddChatUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDeleteChatUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesCreateChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetState(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosUpdateProfilePhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosUploadProfilePhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadSaveFilePart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetNearestDc(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetAppUpdate(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpSaveAppLog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetInviteText(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosDeletePhotos(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhotosGetUserPhotos(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesForwardMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDhConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesRequestEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesAcceptEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesDiscardEncryption(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetEncryptedTyping(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadEncryptedHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncrypted(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncryptedFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendEncryptedService(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReceivedQueue(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadSaveBigFilePart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InitConnection(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetSupport(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthBindTempAuthKey(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsExportCard(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsImportCard(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadMessageContents(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountCheckUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPrivacy(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSetPrivacy(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountDeleteAccount(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetAccountTTL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSetAccountTTL(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeWithLayer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResolveUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSendChangePhoneCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountChangePhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdateDeviceLocked(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCheckPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetWebPagePreview(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetAuthorizations(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountResetAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetPasswordSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountUpdatePasswordSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthRequestPasswordRecovery(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthRecoverPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InvokeWithoutUpdates(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesExportChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesCheckChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesImportChatInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesInstallStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesUninstallStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthImportBotAuthorization(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesStartBot(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetAppChangelog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessagesViews(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatesGetChannelDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReadHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteUserHistory(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetMessages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetParticipants(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetParticipant(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetFullChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsCreateChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditAbout(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditAdmin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditTitle(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditPhoto(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsCheckUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsUpdateUsername(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsJoinChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsLeaveChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsInviteToChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsExportInvite(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsDeleteChannel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesToggleChatAdmins(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditChatAdmin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesMigrateChat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearchGlobal(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountReportPeer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReorderStickerSets(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetTermsOfService(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetDocumentByHash(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSearchGifs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetSavedGifs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveGif(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetInlineBotResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetInlineBotResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendInlineBotResult(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsToggleInvites(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsExportMessageLink(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsToggleSignatures(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesHideReportSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPeerSettings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsUpdatePinnedMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthResendCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthCancelCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMessageEditData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesEditInlineBotMessage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetBotCallbackAnswer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotCallbackAnswer(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsGetTopPeers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResetTopPeerRating(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPeerDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveDraft(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllDrafts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountSendConfirmPhoneCode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountConfirmPhone(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFeaturedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReadFeaturedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetRecentStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSaveRecentSticker(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesClearRecentStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetArchivedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetAdminedPublicChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AuthDropTempAuthKeys(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetGameScore(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetInlineGameScore(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetMaskStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAttachedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetGameHighScores(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetInlineGameHighScores(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetCommonChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetAllChats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpSetBotUpdatesStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetWebPage(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesToggleDialogPin(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReorderPinnedDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetPinnedDialogs(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneRequestCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneAcceptCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneDiscardCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneReceivedCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesReportEncryptedSpam(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetPaymentForm(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsSendPaymentForm(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountGetTmpPassword(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotShippingResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSetBotPrecheckoutResults(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetWebFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BotsSendCustomRequest(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BotsAnswerWebhookJSONQuery(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetPaymentReceipt(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsValidateRequestedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsGetSavedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PaymentsClearSavedInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneGetCallConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneConfirmCall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneSetCallRating(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PhoneSaveCallDebug(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetCdnFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadReuploadCdnFile(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HelpGetCdnConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesUploadMedia(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersCreateStickerSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetLangPack(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetStrings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetDifference(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LangpackGetLanguages(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsEditBanned(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsGetAdminLog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersRemoveStickerFromSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersChangeStickerPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StickersAddStickerToSet(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesSendScreenshotNotification(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UploadGetCdnFileHashes(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetUnreadMentions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesFaveSticker(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsSetStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ContactsResetSaved(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessagesGetFavedStickers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelsReadMessageContents(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MtprotoServicer_to_server(servicer, server):
rpc_method_handlers = {
'InvokeAfterMsg': grpc.unary_unary_rpc_method_handler(
servicer.InvokeAfterMsg,
request_deserializer=tl__pb2.ReqInvokeAfterMsg.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'InvokeAfterMsgs': grpc.unary_unary_rpc_method_handler(
servicer.InvokeAfterMsgs,
request_deserializer=tl__pb2.ReqInvokeAfterMsgs.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'AuthCheckPhone': grpc.unary_unary_rpc_method_handler(
servicer.AuthCheckPhone,
request_deserializer=tl__pb2.ReqAuthCheckPhone.FromString,
response_serializer=tl__pb2.TypeAuthCheckedPhone.SerializeToString,
),
'AuthSendCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthSendCode,
request_deserializer=tl__pb2.ReqAuthSendCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AuthSignUp': grpc.unary_unary_rpc_method_handler(
servicer.AuthSignUp,
request_deserializer=tl__pb2.ReqAuthSignUp.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AuthSignIn': grpc.unary_unary_rpc_method_handler(
servicer.AuthSignIn,
request_deserializer=tl__pb2.ReqAuthSignIn.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AuthLogOut': grpc.unary_unary_rpc_method_handler(
servicer.AuthLogOut,
request_deserializer=tl__pb2.ReqAuthLogOut.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthResetAuthorizations': grpc.unary_unary_rpc_method_handler(
servicer.AuthResetAuthorizations,
request_deserializer=tl__pb2.ReqAuthResetAuthorizations.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthSendInvites': grpc.unary_unary_rpc_method_handler(
servicer.AuthSendInvites,
request_deserializer=tl__pb2.ReqAuthSendInvites.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthExportAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthExportAuthorization,
request_deserializer=tl__pb2.ReqAuthExportAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthExportedAuthorization.SerializeToString,
),
'AuthImportAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthImportAuthorization,
request_deserializer=tl__pb2.ReqAuthImportAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'AccountRegisterDevice': grpc.unary_unary_rpc_method_handler(
servicer.AccountRegisterDevice,
request_deserializer=tl__pb2.ReqAccountRegisterDevice.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUnregisterDevice': grpc.unary_unary_rpc_method_handler(
servicer.AccountUnregisterDevice,
request_deserializer=tl__pb2.ReqAccountUnregisterDevice.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateNotifySettings,
request_deserializer=tl__pb2.ReqAccountUpdateNotifySettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetNotifySettings,
request_deserializer=tl__pb2.ReqAccountGetNotifySettings.FromString,
response_serializer=tl__pb2.TypePeerNotifySettings.SerializeToString,
),
'AccountResetNotifySettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountResetNotifySettings,
request_deserializer=tl__pb2.ReqAccountResetNotifySettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateProfile': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateProfile,
request_deserializer=tl__pb2.ReqAccountUpdateProfile.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'AccountUpdateStatus': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateStatus,
request_deserializer=tl__pb2.ReqAccountUpdateStatus.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetWallPapers': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetWallPapers,
request_deserializer=tl__pb2.ReqAccountGetWallPapers.FromString,
response_serializer=tl__pb2.TypeVectorWallPaper.SerializeToString,
),
'UsersGetUsers': grpc.unary_unary_rpc_method_handler(
servicer.UsersGetUsers,
request_deserializer=tl__pb2.ReqUsersGetUsers.FromString,
response_serializer=tl__pb2.TypeVectorUser.SerializeToString,
),
'UsersGetFullUser': grpc.unary_unary_rpc_method_handler(
servicer.UsersGetFullUser,
request_deserializer=tl__pb2.ReqUsersGetFullUser.FromString,
response_serializer=tl__pb2.TypeUserFull.SerializeToString,
),
'ContactsGetStatuses': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetStatuses,
request_deserializer=tl__pb2.ReqContactsGetStatuses.FromString,
response_serializer=tl__pb2.TypeVectorContactStatus.SerializeToString,
),
'ContactsGetContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetContacts,
request_deserializer=tl__pb2.ReqContactsGetContacts.FromString,
response_serializer=tl__pb2.TypeContactsContacts.SerializeToString,
),
'ContactsImportContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsImportContacts,
request_deserializer=tl__pb2.ReqContactsImportContacts.FromString,
response_serializer=tl__pb2.TypeContactsImportedContacts.SerializeToString,
),
'ContactsSearch': grpc.unary_unary_rpc_method_handler(
servicer.ContactsSearch,
request_deserializer=tl__pb2.ReqContactsSearch.FromString,
response_serializer=tl__pb2.TypeContactsFound.SerializeToString,
),
'ContactsDeleteContact': grpc.unary_unary_rpc_method_handler(
servicer.ContactsDeleteContact,
request_deserializer=tl__pb2.ReqContactsDeleteContact.FromString,
response_serializer=tl__pb2.TypeContactsLink.SerializeToString,
),
'ContactsDeleteContacts': grpc.unary_unary_rpc_method_handler(
servicer.ContactsDeleteContacts,
request_deserializer=tl__pb2.ReqContactsDeleteContacts.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsBlock': grpc.unary_unary_rpc_method_handler(
servicer.ContactsBlock,
request_deserializer=tl__pb2.ReqContactsBlock.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsUnblock': grpc.unary_unary_rpc_method_handler(
servicer.ContactsUnblock,
request_deserializer=tl__pb2.ReqContactsUnblock.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsGetBlocked': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetBlocked,
request_deserializer=tl__pb2.ReqContactsGetBlocked.FromString,
response_serializer=tl__pb2.TypeContactsBlocked.SerializeToString,
),
'MessagesGetMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessages,
request_deserializer=tl__pb2.ReqMessagesGetMessages.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesGetDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDialogs,
request_deserializer=tl__pb2.ReqMessagesGetDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesDialogs.SerializeToString,
),
'MessagesGetHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetHistory,
request_deserializer=tl__pb2.ReqMessagesGetHistory.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesSearch': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearch,
request_deserializer=tl__pb2.ReqMessagesSearch.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesReadHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadHistory,
request_deserializer=tl__pb2.ReqMessagesReadHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'MessagesDeleteHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteHistory,
request_deserializer=tl__pb2.ReqMessagesDeleteHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedHistory.SerializeToString,
),
'MessagesDeleteMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteMessages,
request_deserializer=tl__pb2.ReqMessagesDeleteMessages.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'MessagesReceivedMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReceivedMessages,
request_deserializer=tl__pb2.ReqMessagesReceivedMessages.FromString,
response_serializer=tl__pb2.TypeVectorReceivedNotifyMessage.SerializeToString,
),
'MessagesSetTyping': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetTyping,
request_deserializer=tl__pb2.ReqMessagesSetTyping.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendMessage,
request_deserializer=tl__pb2.ReqMessagesSendMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSendMedia': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendMedia,
request_deserializer=tl__pb2.ReqMessagesSendMedia.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesForwardMessages': grpc.unary_unary_rpc_method_handler(
servicer.MessagesForwardMessages,
request_deserializer=tl__pb2.ReqMessagesForwardMessages.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetChats,
request_deserializer=tl__pb2.ReqMessagesGetChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'MessagesGetFullChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFullChat,
request_deserializer=tl__pb2.ReqMessagesGetFullChat.FromString,
response_serializer=tl__pb2.TypeMessagesChatFull.SerializeToString,
),
'MessagesEditChatTitle': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatTitle,
request_deserializer=tl__pb2.ReqMessagesEditChatTitle.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditChatPhoto': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatPhoto,
request_deserializer=tl__pb2.ReqMessagesEditChatPhoto.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesAddChatUser': grpc.unary_unary_rpc_method_handler(
servicer.MessagesAddChatUser,
request_deserializer=tl__pb2.ReqMessagesAddChatUser.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesDeleteChatUser': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDeleteChatUser,
request_deserializer=tl__pb2.ReqMessagesDeleteChatUser.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesCreateChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesCreateChat,
request_deserializer=tl__pb2.ReqMessagesCreateChat.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'UpdatesGetState': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetState,
request_deserializer=tl__pb2.ReqUpdatesGetState.FromString,
response_serializer=tl__pb2.TypeUpdatesState.SerializeToString,
),
'UpdatesGetDifference': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetDifference,
request_deserializer=tl__pb2.ReqUpdatesGetDifference.FromString,
response_serializer=tl__pb2.TypeUpdatesDifference.SerializeToString,
),
'PhotosUpdateProfilePhoto': grpc.unary_unary_rpc_method_handler(
servicer.PhotosUpdateProfilePhoto,
request_deserializer=tl__pb2.ReqPhotosUpdateProfilePhoto.FromString,
response_serializer=tl__pb2.TypeUserProfilePhoto.SerializeToString,
),
'PhotosUploadProfilePhoto': grpc.unary_unary_rpc_method_handler(
servicer.PhotosUploadProfilePhoto,
request_deserializer=tl__pb2.ReqPhotosUploadProfilePhoto.FromString,
response_serializer=tl__pb2.TypePhotosPhoto.SerializeToString,
),
'UploadSaveFilePart': grpc.unary_unary_rpc_method_handler(
servicer.UploadSaveFilePart,
request_deserializer=tl__pb2.ReqUploadSaveFilePart.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetFile,
request_deserializer=tl__pb2.ReqUploadGetFile.FromString,
response_serializer=tl__pb2.TypeUploadFile.SerializeToString,
),
'HelpGetConfig': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetConfig,
request_deserializer=tl__pb2.ReqHelpGetConfig.FromString,
response_serializer=tl__pb2.TypeConfig.SerializeToString,
),
'HelpGetNearestDc': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetNearestDc,
request_deserializer=tl__pb2.ReqHelpGetNearestDc.FromString,
response_serializer=tl__pb2.TypeNearestDc.SerializeToString,
),
'HelpGetAppUpdate': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetAppUpdate,
request_deserializer=tl__pb2.ReqHelpGetAppUpdate.FromString,
response_serializer=tl__pb2.TypeHelpAppUpdate.SerializeToString,
),
'HelpSaveAppLog': grpc.unary_unary_rpc_method_handler(
servicer.HelpSaveAppLog,
request_deserializer=tl__pb2.ReqHelpSaveAppLog.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'HelpGetInviteText': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetInviteText,
request_deserializer=tl__pb2.ReqHelpGetInviteText.FromString,
response_serializer=tl__pb2.TypeHelpInviteText.SerializeToString,
),
'PhotosDeletePhotos': grpc.unary_unary_rpc_method_handler(
servicer.PhotosDeletePhotos,
request_deserializer=tl__pb2.ReqPhotosDeletePhotos.FromString,
response_serializer=tl__pb2.TypeVectorLong.SerializeToString,
),
'PhotosGetUserPhotos': grpc.unary_unary_rpc_method_handler(
servicer.PhotosGetUserPhotos,
request_deserializer=tl__pb2.ReqPhotosGetUserPhotos.FromString,
response_serializer=tl__pb2.TypePhotosPhotos.SerializeToString,
),
'MessagesForwardMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesForwardMessage,
request_deserializer=tl__pb2.ReqMessagesForwardMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetDhConfig': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDhConfig,
request_deserializer=tl__pb2.ReqMessagesGetDhConfig.FromString,
response_serializer=tl__pb2.TypeMessagesDhConfig.SerializeToString,
),
'MessagesRequestEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesRequestEncryption,
request_deserializer=tl__pb2.ReqMessagesRequestEncryption.FromString,
response_serializer=tl__pb2.TypeEncryptedChat.SerializeToString,
),
'MessagesAcceptEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesAcceptEncryption,
request_deserializer=tl__pb2.ReqMessagesAcceptEncryption.FromString,
response_serializer=tl__pb2.TypeEncryptedChat.SerializeToString,
),
'MessagesDiscardEncryption': grpc.unary_unary_rpc_method_handler(
servicer.MessagesDiscardEncryption,
request_deserializer=tl__pb2.ReqMessagesDiscardEncryption.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetEncryptedTyping': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetEncryptedTyping,
request_deserializer=tl__pb2.ReqMessagesSetEncryptedTyping.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReadEncryptedHistory': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadEncryptedHistory,
request_deserializer=tl__pb2.ReqMessagesReadEncryptedHistory.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendEncrypted': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncrypted,
request_deserializer=tl__pb2.ReqMessagesSendEncrypted.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesSendEncryptedFile': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncryptedFile,
request_deserializer=tl__pb2.ReqMessagesSendEncryptedFile.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesSendEncryptedService': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendEncryptedService,
request_deserializer=tl__pb2.ReqMessagesSendEncryptedService.FromString,
response_serializer=tl__pb2.TypeMessagesSentEncryptedMessage.SerializeToString,
),
'MessagesReceivedQueue': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReceivedQueue,
request_deserializer=tl__pb2.ReqMessagesReceivedQueue.FromString,
response_serializer=tl__pb2.TypeVectorLong.SerializeToString,
),
'UploadSaveBigFilePart': grpc.unary_unary_rpc_method_handler(
servicer.UploadSaveBigFilePart,
request_deserializer=tl__pb2.ReqUploadSaveBigFilePart.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'InitConnection': grpc.unary_unary_rpc_method_handler(
servicer.InitConnection,
request_deserializer=tl__pb2.ReqInitConnection.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'HelpGetSupport': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetSupport,
request_deserializer=tl__pb2.ReqHelpGetSupport.FromString,
response_serializer=tl__pb2.TypeHelpSupport.SerializeToString,
),
'AuthBindTempAuthKey': grpc.unary_unary_rpc_method_handler(
servicer.AuthBindTempAuthKey,
request_deserializer=tl__pb2.ReqAuthBindTempAuthKey.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsExportCard': grpc.unary_unary_rpc_method_handler(
servicer.ContactsExportCard,
request_deserializer=tl__pb2.ReqContactsExportCard.FromString,
response_serializer=tl__pb2.TypeVectorInt.SerializeToString,
),
'ContactsImportCard': grpc.unary_unary_rpc_method_handler(
servicer.ContactsImportCard,
request_deserializer=tl__pb2.ReqContactsImportCard.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'MessagesReadMessageContents': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadMessageContents,
request_deserializer=tl__pb2.ReqMessagesReadMessageContents.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'AccountCheckUsername': grpc.unary_unary_rpc_method_handler(
servicer.AccountCheckUsername,
request_deserializer=tl__pb2.ReqAccountCheckUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountUpdateUsername': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateUsername,
request_deserializer=tl__pb2.ReqAccountUpdateUsername.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'AccountGetPrivacy': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPrivacy,
request_deserializer=tl__pb2.ReqAccountGetPrivacy.FromString,
response_serializer=tl__pb2.TypeAccountPrivacyRules.SerializeToString,
),
'AccountSetPrivacy': grpc.unary_unary_rpc_method_handler(
servicer.AccountSetPrivacy,
request_deserializer=tl__pb2.ReqAccountSetPrivacy.FromString,
response_serializer=tl__pb2.TypeAccountPrivacyRules.SerializeToString,
),
'AccountDeleteAccount': grpc.unary_unary_rpc_method_handler(
servicer.AccountDeleteAccount,
request_deserializer=tl__pb2.ReqAccountDeleteAccount.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetAccountTTL': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetAccountTTL,
request_deserializer=tl__pb2.ReqAccountGetAccountTTL.FromString,
response_serializer=tl__pb2.TypeAccountDaysTTL.SerializeToString,
),
'AccountSetAccountTTL': grpc.unary_unary_rpc_method_handler(
servicer.AccountSetAccountTTL,
request_deserializer=tl__pb2.ReqAccountSetAccountTTL.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'InvokeWithLayer': grpc.unary_unary_rpc_method_handler(
servicer.InvokeWithLayer,
request_deserializer=tl__pb2.ReqInvokeWithLayer.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'ContactsResolveUsername': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResolveUsername,
request_deserializer=tl__pb2.ReqContactsResolveUsername.FromString,
response_serializer=tl__pb2.TypeContactsResolvedPeer.SerializeToString,
),
'AccountSendChangePhoneCode': grpc.unary_unary_rpc_method_handler(
servicer.AccountSendChangePhoneCode,
request_deserializer=tl__pb2.ReqAccountSendChangePhoneCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AccountChangePhone': grpc.unary_unary_rpc_method_handler(
servicer.AccountChangePhone,
request_deserializer=tl__pb2.ReqAccountChangePhone.FromString,
response_serializer=tl__pb2.TypeUser.SerializeToString,
),
'MessagesGetAllStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllStickers,
request_deserializer=tl__pb2.ReqMessagesGetAllStickers.FromString,
response_serializer=tl__pb2.TypeMessagesAllStickers.SerializeToString,
),
'AccountUpdateDeviceLocked': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdateDeviceLocked,
request_deserializer=tl__pb2.ReqAccountUpdateDeviceLocked.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetPassword': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPassword,
request_deserializer=tl__pb2.ReqAccountGetPassword.FromString,
response_serializer=tl__pb2.TypeAccountPassword.SerializeToString,
),
'AuthCheckPassword': grpc.unary_unary_rpc_method_handler(
servicer.AuthCheckPassword,
request_deserializer=tl__pb2.ReqAuthCheckPassword.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'MessagesGetWebPagePreview': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetWebPagePreview,
request_deserializer=tl__pb2.ReqMessagesGetWebPagePreview.FromString,
response_serializer=tl__pb2.TypeMessageMedia.SerializeToString,
),
'AccountGetAuthorizations': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetAuthorizations,
request_deserializer=tl__pb2.ReqAccountGetAuthorizations.FromString,
response_serializer=tl__pb2.TypeAccountAuthorizations.SerializeToString,
),
'AccountResetAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AccountResetAuthorization,
request_deserializer=tl__pb2.ReqAccountResetAuthorization.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AccountGetPasswordSettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetPasswordSettings,
request_deserializer=tl__pb2.ReqAccountGetPasswordSettings.FromString,
response_serializer=tl__pb2.TypeAccountPasswordSettings.SerializeToString,
),
'AccountUpdatePasswordSettings': grpc.unary_unary_rpc_method_handler(
servicer.AccountUpdatePasswordSettings,
request_deserializer=tl__pb2.ReqAccountUpdatePasswordSettings.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthRequestPasswordRecovery': grpc.unary_unary_rpc_method_handler(
servicer.AuthRequestPasswordRecovery,
request_deserializer=tl__pb2.ReqAuthRequestPasswordRecovery.FromString,
response_serializer=tl__pb2.TypeAuthPasswordRecovery.SerializeToString,
),
'AuthRecoverPassword': grpc.unary_unary_rpc_method_handler(
servicer.AuthRecoverPassword,
request_deserializer=tl__pb2.ReqAuthRecoverPassword.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'InvokeWithoutUpdates': grpc.unary_unary_rpc_method_handler(
servicer.InvokeWithoutUpdates,
request_deserializer=tl__pb2.ReqInvokeWithoutUpdates.FromString,
response_serializer=google_dot_protobuf_dot_any__pb2.Any.SerializeToString,
),
'MessagesExportChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesExportChatInvite,
request_deserializer=tl__pb2.ReqMessagesExportChatInvite.FromString,
response_serializer=tl__pb2.TypeExportedChatInvite.SerializeToString,
),
'MessagesCheckChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesCheckChatInvite,
request_deserializer=tl__pb2.ReqMessagesCheckChatInvite.FromString,
response_serializer=tl__pb2.TypeChatInvite.SerializeToString,
),
'MessagesImportChatInvite': grpc.unary_unary_rpc_method_handler(
servicer.MessagesImportChatInvite,
request_deserializer=tl__pb2.ReqMessagesImportChatInvite.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesGetStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetStickerSet,
request_deserializer=tl__pb2.ReqMessagesGetStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'MessagesInstallStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesInstallStickerSet,
request_deserializer=tl__pb2.ReqMessagesInstallStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSetInstallResult.SerializeToString,
),
'MessagesUninstallStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.MessagesUninstallStickerSet,
request_deserializer=tl__pb2.ReqMessagesUninstallStickerSet.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'AuthImportBotAuthorization': grpc.unary_unary_rpc_method_handler(
servicer.AuthImportBotAuthorization,
request_deserializer=tl__pb2.ReqAuthImportBotAuthorization.FromString,
response_serializer=tl__pb2.TypeAuthAuthorization.SerializeToString,
),
'MessagesStartBot': grpc.unary_unary_rpc_method_handler(
servicer.MessagesStartBot,
request_deserializer=tl__pb2.ReqMessagesStartBot.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'HelpGetAppChangelog': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetAppChangelog,
request_deserializer=tl__pb2.ReqHelpGetAppChangelog.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReportSpam,
request_deserializer=tl__pb2.ReqMessagesReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMessagesViews': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessagesViews,
request_deserializer=tl__pb2.ReqMessagesGetMessagesViews.FromString,
response_serializer=tl__pb2.TypeVectorInt.SerializeToString,
),
'UpdatesGetChannelDifference': grpc.unary_unary_rpc_method_handler(
servicer.UpdatesGetChannelDifference,
request_deserializer=tl__pb2.ReqUpdatesGetChannelDifference.FromString,
response_serializer=tl__pb2.TypeUpdatesChannelDifference.SerializeToString,
),
'ChannelsReadHistory': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReadHistory,
request_deserializer=tl__pb2.ReqChannelsReadHistory.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsDeleteMessages': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteMessages,
request_deserializer=tl__pb2.ReqChannelsDeleteMessages.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedMessages.SerializeToString,
),
'ChannelsDeleteUserHistory': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteUserHistory,
request_deserializer=tl__pb2.ReqChannelsDeleteUserHistory.FromString,
response_serializer=tl__pb2.TypeMessagesAffectedHistory.SerializeToString,
),
'ChannelsReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReportSpam,
request_deserializer=tl__pb2.ReqChannelsReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsGetMessages': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetMessages,
request_deserializer=tl__pb2.ReqChannelsGetMessages.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'ChannelsGetParticipants': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetParticipants,
request_deserializer=tl__pb2.ReqChannelsGetParticipants.FromString,
response_serializer=tl__pb2.TypeChannelsChannelParticipants.SerializeToString,
),
'ChannelsGetParticipant': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetParticipant,
request_deserializer=tl__pb2.ReqChannelsGetParticipant.FromString,
response_serializer=tl__pb2.TypeChannelsChannelParticipant.SerializeToString,
),
'ChannelsGetChannels': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetChannels,
request_deserializer=tl__pb2.ReqChannelsGetChannels.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'ChannelsGetFullChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetFullChannel,
request_deserializer=tl__pb2.ReqChannelsGetFullChannel.FromString,
response_serializer=tl__pb2.TypeMessagesChatFull.SerializeToString,
),
'ChannelsCreateChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsCreateChannel,
request_deserializer=tl__pb2.ReqChannelsCreateChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditAbout': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditAbout,
request_deserializer=tl__pb2.ReqChannelsEditAbout.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsEditAdmin': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditAdmin,
request_deserializer=tl__pb2.ReqChannelsEditAdmin.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditTitle': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditTitle,
request_deserializer=tl__pb2.ReqChannelsEditTitle.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsEditPhoto': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditPhoto,
request_deserializer=tl__pb2.ReqChannelsEditPhoto.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsCheckUsername': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsCheckUsername,
request_deserializer=tl__pb2.ReqChannelsCheckUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsUpdateUsername': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsUpdateUsername,
request_deserializer=tl__pb2.ReqChannelsUpdateUsername.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsJoinChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsJoinChannel,
request_deserializer=tl__pb2.ReqChannelsJoinChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsLeaveChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsLeaveChannel,
request_deserializer=tl__pb2.ReqChannelsLeaveChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsInviteToChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsInviteToChannel,
request_deserializer=tl__pb2.ReqChannelsInviteToChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsExportInvite': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsExportInvite,
request_deserializer=tl__pb2.ReqChannelsExportInvite.FromString,
response_serializer=tl__pb2.TypeExportedChatInvite.SerializeToString,
),
'ChannelsDeleteChannel': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsDeleteChannel,
request_deserializer=tl__pb2.ReqChannelsDeleteChannel.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesToggleChatAdmins': grpc.unary_unary_rpc_method_handler(
servicer.MessagesToggleChatAdmins,
request_deserializer=tl__pb2.ReqMessagesToggleChatAdmins.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditChatAdmin': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditChatAdmin,
request_deserializer=tl__pb2.ReqMessagesEditChatAdmin.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesMigrateChat': grpc.unary_unary_rpc_method_handler(
servicer.MessagesMigrateChat,
request_deserializer=tl__pb2.ReqMessagesMigrateChat.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSearchGlobal': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearchGlobal,
request_deserializer=tl__pb2.ReqMessagesSearchGlobal.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'AccountReportPeer': grpc.unary_unary_rpc_method_handler(
servicer.AccountReportPeer,
request_deserializer=tl__pb2.ReqAccountReportPeer.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReorderStickerSets': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReorderStickerSets,
request_deserializer=tl__pb2.ReqMessagesReorderStickerSets.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'HelpGetTermsOfService': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetTermsOfService,
request_deserializer=tl__pb2.ReqHelpGetTermsOfService.FromString,
response_serializer=tl__pb2.TypeHelpTermsOfService.SerializeToString,
),
'MessagesGetDocumentByHash': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetDocumentByHash,
request_deserializer=tl__pb2.ReqMessagesGetDocumentByHash.FromString,
response_serializer=tl__pb2.TypeDocument.SerializeToString,
),
'MessagesSearchGifs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSearchGifs,
request_deserializer=tl__pb2.ReqMessagesSearchGifs.FromString,
response_serializer=tl__pb2.TypeMessagesFoundGifs.SerializeToString,
),
'MessagesGetSavedGifs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetSavedGifs,
request_deserializer=tl__pb2.ReqMessagesGetSavedGifs.FromString,
response_serializer=tl__pb2.TypeMessagesSavedGifs.SerializeToString,
),
'MessagesSaveGif': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveGif,
request_deserializer=tl__pb2.ReqMessagesSaveGif.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetInlineBotResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetInlineBotResults,
request_deserializer=tl__pb2.ReqMessagesGetInlineBotResults.FromString,
response_serializer=tl__pb2.TypeMessagesBotResults.SerializeToString,
),
'MessagesSetInlineBotResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetInlineBotResults,
request_deserializer=tl__pb2.ReqMessagesSetInlineBotResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSendInlineBotResult': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendInlineBotResult,
request_deserializer=tl__pb2.ReqMessagesSendInlineBotResult.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsToggleInvites': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsToggleInvites,
request_deserializer=tl__pb2.ReqChannelsToggleInvites.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsExportMessageLink': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsExportMessageLink,
request_deserializer=tl__pb2.ReqChannelsExportMessageLink.FromString,
response_serializer=tl__pb2.TypeExportedMessageLink.SerializeToString,
),
'ChannelsToggleSignatures': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsToggleSignatures,
request_deserializer=tl__pb2.ReqChannelsToggleSignatures.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesHideReportSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesHideReportSpam,
request_deserializer=tl__pb2.ReqMessagesHideReportSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPeerSettings': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPeerSettings,
request_deserializer=tl__pb2.ReqMessagesGetPeerSettings.FromString,
response_serializer=tl__pb2.TypePeerSettings.SerializeToString,
),
'ChannelsUpdatePinnedMessage': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsUpdatePinnedMessage,
request_deserializer=tl__pb2.ReqChannelsUpdatePinnedMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'AuthResendCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthResendCode,
request_deserializer=tl__pb2.ReqAuthResendCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AuthCancelCode': grpc.unary_unary_rpc_method_handler(
servicer.AuthCancelCode,
request_deserializer=tl__pb2.ReqAuthCancelCode.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMessageEditData': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMessageEditData,
request_deserializer=tl__pb2.ReqMessagesGetMessageEditData.FromString,
response_serializer=tl__pb2.TypeMessagesMessageEditData.SerializeToString,
),
'MessagesEditMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditMessage,
request_deserializer=tl__pb2.ReqMessagesEditMessage.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesEditInlineBotMessage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesEditInlineBotMessage,
request_deserializer=tl__pb2.ReqMessagesEditInlineBotMessage.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetBotCallbackAnswer': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetBotCallbackAnswer,
request_deserializer=tl__pb2.ReqMessagesGetBotCallbackAnswer.FromString,
response_serializer=tl__pb2.TypeMessagesBotCallbackAnswer.SerializeToString,
),
'MessagesSetBotCallbackAnswer': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotCallbackAnswer,
request_deserializer=tl__pb2.ReqMessagesSetBotCallbackAnswer.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsGetTopPeers': grpc.unary_unary_rpc_method_handler(
servicer.ContactsGetTopPeers,
request_deserializer=tl__pb2.ReqContactsGetTopPeers.FromString,
response_serializer=tl__pb2.TypeContactsTopPeers.SerializeToString,
),
'ContactsResetTopPeerRating': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResetTopPeerRating,
request_deserializer=tl__pb2.ReqContactsResetTopPeerRating.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPeerDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPeerDialogs,
request_deserializer=tl__pb2.ReqMessagesGetPeerDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesPeerDialogs.SerializeToString,
),
'MessagesSaveDraft': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveDraft,
request_deserializer=tl__pb2.ReqMessagesSaveDraft.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetAllDrafts': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllDrafts,
request_deserializer=tl__pb2.ReqMessagesGetAllDrafts.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'AccountSendConfirmPhoneCode': grpc.unary_unary_rpc_method_handler(
servicer.AccountSendConfirmPhoneCode,
request_deserializer=tl__pb2.ReqAccountSendConfirmPhoneCode.FromString,
response_serializer=tl__pb2.TypeAuthSentCode.SerializeToString,
),
'AccountConfirmPhone': grpc.unary_unary_rpc_method_handler(
servicer.AccountConfirmPhone,
request_deserializer=tl__pb2.ReqAccountConfirmPhone.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetFeaturedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFeaturedStickers,
request_deserializer=tl__pb2.ReqMessagesGetFeaturedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesFeaturedStickers.SerializeToString,
),
'MessagesReadFeaturedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReadFeaturedStickers,
request_deserializer=tl__pb2.ReqMessagesReadFeaturedStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetRecentStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetRecentStickers,
request_deserializer=tl__pb2.ReqMessagesGetRecentStickers.FromString,
response_serializer=tl__pb2.TypeMessagesRecentStickers.SerializeToString,
),
'MessagesSaveRecentSticker': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSaveRecentSticker,
request_deserializer=tl__pb2.ReqMessagesSaveRecentSticker.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesClearRecentStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesClearRecentStickers,
request_deserializer=tl__pb2.ReqMessagesClearRecentStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetArchivedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetArchivedStickers,
request_deserializer=tl__pb2.ReqMessagesGetArchivedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesArchivedStickers.SerializeToString,
),
'ChannelsGetAdminedPublicChannels': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetAdminedPublicChannels,
request_deserializer=tl__pb2.ReqChannelsGetAdminedPublicChannels.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'AuthDropTempAuthKeys': grpc.unary_unary_rpc_method_handler(
servicer.AuthDropTempAuthKeys,
request_deserializer=tl__pb2.ReqAuthDropTempAuthKeys.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetGameScore': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetGameScore,
request_deserializer=tl__pb2.ReqMessagesSetGameScore.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'MessagesSetInlineGameScore': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetInlineGameScore,
request_deserializer=tl__pb2.ReqMessagesSetInlineGameScore.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetMaskStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetMaskStickers,
request_deserializer=tl__pb2.ReqMessagesGetMaskStickers.FromString,
response_serializer=tl__pb2.TypeMessagesAllStickers.SerializeToString,
),
'MessagesGetAttachedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAttachedStickers,
request_deserializer=tl__pb2.ReqMessagesGetAttachedStickers.FromString,
response_serializer=tl__pb2.TypeVectorStickerSetCovered.SerializeToString,
),
'MessagesGetGameHighScores': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetGameHighScores,
request_deserializer=tl__pb2.ReqMessagesGetGameHighScores.FromString,
response_serializer=tl__pb2.TypeMessagesHighScores.SerializeToString,
),
'MessagesGetInlineGameHighScores': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetInlineGameHighScores,
request_deserializer=tl__pb2.ReqMessagesGetInlineGameHighScores.FromString,
response_serializer=tl__pb2.TypeMessagesHighScores.SerializeToString,
),
'MessagesGetCommonChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetCommonChats,
request_deserializer=tl__pb2.ReqMessagesGetCommonChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'MessagesGetAllChats': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetAllChats,
request_deserializer=tl__pb2.ReqMessagesGetAllChats.FromString,
response_serializer=tl__pb2.TypeMessagesChats.SerializeToString,
),
'HelpSetBotUpdatesStatus': grpc.unary_unary_rpc_method_handler(
servicer.HelpSetBotUpdatesStatus,
request_deserializer=tl__pb2.ReqHelpSetBotUpdatesStatus.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetWebPage': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetWebPage,
request_deserializer=tl__pb2.ReqMessagesGetWebPage.FromString,
response_serializer=tl__pb2.TypeWebPage.SerializeToString,
),
'MessagesToggleDialogPin': grpc.unary_unary_rpc_method_handler(
servicer.MessagesToggleDialogPin,
request_deserializer=tl__pb2.ReqMessagesToggleDialogPin.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReorderPinnedDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReorderPinnedDialogs,
request_deserializer=tl__pb2.ReqMessagesReorderPinnedDialogs.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetPinnedDialogs': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetPinnedDialogs,
request_deserializer=tl__pb2.ReqMessagesGetPinnedDialogs.FromString,
response_serializer=tl__pb2.TypeMessagesPeerDialogs.SerializeToString,
),
'PhoneRequestCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneRequestCall,
request_deserializer=tl__pb2.ReqPhoneRequestCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneAcceptCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneAcceptCall,
request_deserializer=tl__pb2.ReqPhoneAcceptCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneDiscardCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneDiscardCall,
request_deserializer=tl__pb2.ReqPhoneDiscardCall.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'PhoneReceivedCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneReceivedCall,
request_deserializer=tl__pb2.ReqPhoneReceivedCall.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesReportEncryptedSpam': grpc.unary_unary_rpc_method_handler(
servicer.MessagesReportEncryptedSpam,
request_deserializer=tl__pb2.ReqMessagesReportEncryptedSpam.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PaymentsGetPaymentForm': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetPaymentForm,
request_deserializer=tl__pb2.ReqPaymentsGetPaymentForm.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentForm.SerializeToString,
),
'PaymentsSendPaymentForm': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsSendPaymentForm,
request_deserializer=tl__pb2.ReqPaymentsSendPaymentForm.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentResult.SerializeToString,
),
'AccountGetTmpPassword': grpc.unary_unary_rpc_method_handler(
servicer.AccountGetTmpPassword,
request_deserializer=tl__pb2.ReqAccountGetTmpPassword.FromString,
response_serializer=tl__pb2.TypeAccountTmpPassword.SerializeToString,
),
'MessagesSetBotShippingResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotShippingResults,
request_deserializer=tl__pb2.ReqMessagesSetBotShippingResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesSetBotPrecheckoutResults': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSetBotPrecheckoutResults,
request_deserializer=tl__pb2.ReqMessagesSetBotPrecheckoutResults.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetWebFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetWebFile,
request_deserializer=tl__pb2.ReqUploadGetWebFile.FromString,
response_serializer=tl__pb2.TypeUploadWebFile.SerializeToString,
),
'BotsSendCustomRequest': grpc.unary_unary_rpc_method_handler(
servicer.BotsSendCustomRequest,
request_deserializer=tl__pb2.ReqBotsSendCustomRequest.FromString,
response_serializer=tl__pb2.TypeDataJSON.SerializeToString,
),
'BotsAnswerWebhookJSONQuery': grpc.unary_unary_rpc_method_handler(
servicer.BotsAnswerWebhookJSONQuery,
request_deserializer=tl__pb2.ReqBotsAnswerWebhookJSONQuery.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PaymentsGetPaymentReceipt': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetPaymentReceipt,
request_deserializer=tl__pb2.ReqPaymentsGetPaymentReceipt.FromString,
response_serializer=tl__pb2.TypePaymentsPaymentReceipt.SerializeToString,
),
'PaymentsValidateRequestedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsValidateRequestedInfo,
request_deserializer=tl__pb2.ReqPaymentsValidateRequestedInfo.FromString,
response_serializer=tl__pb2.TypePaymentsValidatedRequestedInfo.SerializeToString,
),
'PaymentsGetSavedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsGetSavedInfo,
request_deserializer=tl__pb2.ReqPaymentsGetSavedInfo.FromString,
response_serializer=tl__pb2.TypePaymentsSavedInfo.SerializeToString,
),
'PaymentsClearSavedInfo': grpc.unary_unary_rpc_method_handler(
servicer.PaymentsClearSavedInfo,
request_deserializer=tl__pb2.ReqPaymentsClearSavedInfo.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'PhoneGetCallConfig': grpc.unary_unary_rpc_method_handler(
servicer.PhoneGetCallConfig,
request_deserializer=tl__pb2.ReqPhoneGetCallConfig.FromString,
response_serializer=tl__pb2.TypeDataJSON.SerializeToString,
),
'PhoneConfirmCall': grpc.unary_unary_rpc_method_handler(
servicer.PhoneConfirmCall,
request_deserializer=tl__pb2.ReqPhoneConfirmCall.FromString,
response_serializer=tl__pb2.TypePhonePhoneCall.SerializeToString,
),
'PhoneSetCallRating': grpc.unary_unary_rpc_method_handler(
servicer.PhoneSetCallRating,
request_deserializer=tl__pb2.ReqPhoneSetCallRating.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'PhoneSaveCallDebug': grpc.unary_unary_rpc_method_handler(
servicer.PhoneSaveCallDebug,
request_deserializer=tl__pb2.ReqPhoneSaveCallDebug.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'UploadGetCdnFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetCdnFile,
request_deserializer=tl__pb2.ReqUploadGetCdnFile.FromString,
response_serializer=tl__pb2.TypeUploadCdnFile.SerializeToString,
),
'UploadReuploadCdnFile': grpc.unary_unary_rpc_method_handler(
servicer.UploadReuploadCdnFile,
request_deserializer=tl__pb2.ReqUploadReuploadCdnFile.FromString,
response_serializer=tl__pb2.TypeVectorCdnFileHash.SerializeToString,
),
'HelpGetCdnConfig': grpc.unary_unary_rpc_method_handler(
servicer.HelpGetCdnConfig,
request_deserializer=tl__pb2.ReqHelpGetCdnConfig.FromString,
response_serializer=tl__pb2.TypeCdnConfig.SerializeToString,
),
'MessagesUploadMedia': grpc.unary_unary_rpc_method_handler(
servicer.MessagesUploadMedia,
request_deserializer=tl__pb2.ReqMessagesUploadMedia.FromString,
response_serializer=tl__pb2.TypeMessageMedia.SerializeToString,
),
'StickersCreateStickerSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersCreateStickerSet,
request_deserializer=tl__pb2.ReqStickersCreateStickerSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'LangpackGetLangPack': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetLangPack,
request_deserializer=tl__pb2.ReqLangpackGetLangPack.FromString,
response_serializer=tl__pb2.TypeLangPackDifference.SerializeToString,
),
'LangpackGetStrings': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetStrings,
request_deserializer=tl__pb2.ReqLangpackGetStrings.FromString,
response_serializer=tl__pb2.TypeVectorLangPackString.SerializeToString,
),
'LangpackGetDifference': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetDifference,
request_deserializer=tl__pb2.ReqLangpackGetDifference.FromString,
response_serializer=tl__pb2.TypeLangPackDifference.SerializeToString,
),
'LangpackGetLanguages': grpc.unary_unary_rpc_method_handler(
servicer.LangpackGetLanguages,
request_deserializer=tl__pb2.ReqLangpackGetLanguages.FromString,
response_serializer=tl__pb2.TypeVectorLangPackLanguage.SerializeToString,
),
'ChannelsEditBanned': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsEditBanned,
request_deserializer=tl__pb2.ReqChannelsEditBanned.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'ChannelsGetAdminLog': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsGetAdminLog,
request_deserializer=tl__pb2.ReqChannelsGetAdminLog.FromString,
response_serializer=tl__pb2.TypeChannelsAdminLogResults.SerializeToString,
),
'StickersRemoveStickerFromSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersRemoveStickerFromSet,
request_deserializer=tl__pb2.ReqStickersRemoveStickerFromSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'StickersChangeStickerPosition': grpc.unary_unary_rpc_method_handler(
servicer.StickersChangeStickerPosition,
request_deserializer=tl__pb2.ReqStickersChangeStickerPosition.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'StickersAddStickerToSet': grpc.unary_unary_rpc_method_handler(
servicer.StickersAddStickerToSet,
request_deserializer=tl__pb2.ReqStickersAddStickerToSet.FromString,
response_serializer=tl__pb2.TypeMessagesStickerSet.SerializeToString,
),
'MessagesSendScreenshotNotification': grpc.unary_unary_rpc_method_handler(
servicer.MessagesSendScreenshotNotification,
request_deserializer=tl__pb2.ReqMessagesSendScreenshotNotification.FromString,
response_serializer=tl__pb2.TypeUpdates.SerializeToString,
),
'UploadGetCdnFileHashes': grpc.unary_unary_rpc_method_handler(
servicer.UploadGetCdnFileHashes,
request_deserializer=tl__pb2.ReqUploadGetCdnFileHashes.FromString,
response_serializer=tl__pb2.TypeVectorCdnFileHash.SerializeToString,
),
'MessagesGetUnreadMentions': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetUnreadMentions,
request_deserializer=tl__pb2.ReqMessagesGetUnreadMentions.FromString,
response_serializer=tl__pb2.TypeMessagesMessages.SerializeToString,
),
'MessagesFaveSticker': grpc.unary_unary_rpc_method_handler(
servicer.MessagesFaveSticker,
request_deserializer=tl__pb2.ReqMessagesFaveSticker.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ChannelsSetStickers': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsSetStickers,
request_deserializer=tl__pb2.ReqChannelsSetStickers.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'ContactsResetSaved': grpc.unary_unary_rpc_method_handler(
servicer.ContactsResetSaved,
request_deserializer=tl__pb2.ReqContactsResetSaved.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
'MessagesGetFavedStickers': grpc.unary_unary_rpc_method_handler(
servicer.MessagesGetFavedStickers,
request_deserializer=tl__pb2.ReqMessagesGetFavedStickers.FromString,
response_serializer=tl__pb2.TypeMessagesFavedStickers.SerializeToString,
),
'ChannelsReadMessageContents': grpc.unary_unary_rpc_method_handler(
servicer.ChannelsReadMessageContents,
request_deserializer=tl__pb2.ReqChannelsReadMessageContents.FromString,
response_serializer=tl__pb2.TypeBool.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mtproto.Mtproto', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| en | 0.702747 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! Procedures Constructor. Args: channel: A grpc.Channel. Procedures # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file # missing associated documentation comment in .proto file | 1.827225 | 2 |
testbed/cp_flex_fcfs.py | CN-UPB/fcapp | 0 | 6618146 | from __future__ import division
import networkx as nx
import sys, math, random, time, copy
from crowd_network import *
import pdb
# pdb.set_trace()
# Note: Code uses older terminology: CRC=RCA, CLC=LCA, (sometimes) Flow=DFG
from mininet.node import OVSSwitch
from mininet.node import RemoteController
from mininet.link import TCLink
from mininet.node import CPULimitedHost
from mininet.topo import Topo
from MaxiNet.Frontend import maxinet
from MaxiNet.tools import Tools
from mininet.net import Mininet
import functools
import subprocess, shlex
import requests
import json
from ryu.lib import dpid as dpid_lib
import traceback
import datetime
import re
g_iperf_path = "/home/maxinet/iperf2-code/src/iperf"
g_conf_script_path = "/home/maxinet/flexfcapf/code/configure_delay.sh"
g_echoserver_path = "python /home/maxinet/flexfcapf/code/echoserver.py"
class CPFlex:
def __init__(self, filename=None, flowOption="LeastDemanding", scenario=None, modify_controllers=False, contrProb=None, cn=None, inputversion="flex", evalscen="generic", emulator=None):
if emulator is None:
print("Error: Emulator missing!")
exit(1)
if filename is not None:
if scenario is None:
read_weights_from_file = True
else:
read_weights_from_file = False
self.cn = CrowdNetwork()
valid_network = self.cn.generate_from_file(filename, read_weights_from_file, scenario, modify_controllers, contrProb, inputversion, evalscen)
if valid_network:
self.state = "NOT SOLVED"
else:
self.state = "INVALID NETWORK"
print "Error: Invalid network!"
exit(1)
else:
if cn is None:
print "Error: Nothing to create CPFlex network from!"
exit(1)
else:
self.cn = cn
self.state = "NOT SOLVED"
if len(self.cn.C) == 0:
print "Error: Cannot work without potential hosts!"
exit(1)
self.getFlowOption = flowOption
self.iterations = 0
self.Controlled = []
self.CRCs = []
self.CLCs = []
self.Satisfied = []
self.uncontrolledCLCs = []
self.VCRatio = len(self.cn.V)/(len(self.cn.C))
self.banlist = []
self.flexOperation = False
self.current_time = None # will be set by updateTime if run in simulation mode
self.L_lowload = 0.9
self.T_lowload = 60.0
self.LL_alarm = False
self.LL_alarm_time = None # will be set at first LL_alarm
self.LL_execution = False
self.lastruntime = 0
self.lastiperfstoptime = 0
self.Switches = {}
self.Hosts = {}
self.Links = None
self.RoutingPaths = []
self.TestbedNetwork = None
self.ParentProcess = []
self.TotalSatisfied = []
self.TotalUnsatisfied = []
self.TotalFlowStopped = []
self.emulator = emulator
self.CurrentRoutingPaths = []
self.hObjs = []
self.JsonEntries = []
self.CLCBucketIdList = []
self.iperf_log_tag = ""
def scratchCopy(self):
cntmp = self.cn.copy()
cntmp.cleanup()
return CPFlex(filename=None, cn=cntmp)
def cpgreedy(self):
tstart = time.time()
if self.state == "INVALID NETWORK":
print "Error: Invalid network!"
exit(1)
self.banlist = []
self.iterations += 1
# for simulation stats
self.newCLCcontrols = 0
self.newFlowSats = 0
self.cleanedUpCLCcontrols = 0
self.lastruntime = 0
self.lastiperfstoptime = 0
if self.iterations > 1 and self.flexOperation == True:
self.updateVCRatio()
while len(self.uncontrolledCLCs) > 0:
v = self.uncontrolledCLCs[0]
tmp = self.findCRC(v)
if tmp == False:
self.remCLC(v)
self.uncontrolledCLCs.remove(v)
if self.LL_execution == True:
self.lowload()
elif len(self.Controlled) < len(self.cn.V):
self.browseCurrentCLCs()
if len(self.Controlled) < len(self.cn.V):
self.state = "NOT SOLVED"
self.globalOption = "neighbors"
while len(self.Controlled) < len(self.cn.V):
if len(self.CLCs) == len(self.cn.C):
if self.iterations > 1 and self.flexOperation == True:
self.browseCurrentCLCs()
self.forceControl()
break
self.findCLC(self.globalOption)
if len(self.Controlled) == len(self.cn.V):
self.state = "Solved"
if len(self.CLCs) < len(self.cn.C):
self.globalOption = "flows"
while len(self.Satisfied) < len(self.cn.F):
tmpsat = len(self.Satisfied)
self.findCLC(self.globalOption)
if tmpsat == len(self.Satisfied):
self.newCLCcontrols -= len(self.cn.G.node[self.CLCs[-1]]['CLCcontrol'])
self.banlist.append(self.CLCs[-1])
self.remCLC(self.CLCs[-1])
if len(self.CLCs) + len(self.banlist) == len(self.cn.C):
break
self.cleanupCLCcontrols(self.cn.V)
tend = time.time()
self.lastruntime = tend - tstart - self.lastiperfstoptime
def findCLC(self, option):
# tstart = time.time()
candidates = self.getCLCcandidates(option)
# tend = time.time()
# print "Candidate-Runtime: " + str(tend-tstart)
for v in candidates:
tmp = self.findCRC(v)
if tmp == True:
self.addNewCLC(v)
break
def forceControl(self): # force control of uncontrolled nodes to nearest CLC
uncontrolled = list(set(self.cn.V) - set(self.Controlled))
removed_flows = []
for v in uncontrolled:
CLCtmp = list(self.CLCs)
CLCtmp.sort(key=lambda c: len(nx.shortest_path(self.cn.G, source=c, target=v)))
c = CLCtmp[0]
path = nx.shortest_path(self.cn.G, source=c, target=v)
ftmp = list(self.cn.G.node[c]['Satisfies'])
ftmp.sort(key=lambda f: self.cn.G.node[c]['ProcFlow'][f], reverse=True)
for f in ftmp:
removed_flows.append((c,f))
self.remFlowSat(f)
tmp = self.checkCLC(path)
if tmp == True:
self.addCLCcontrol(path)
break
elif tmp == 2: # processing capacity is fine, now clear path
for i in range(0,len(path)-1):
ftmp2 = [f for f in self.cn.F if self.flowUsesLink(f,path[i],path[i+1]) == True]
ftmp2.sort(key=lambda f: self.cn.fdata[f]['b_flow'], reverse=True)
for x in ftmp2:
removed_flows.append((self.cn.fdata[f]['CLC'],f))
self.remFlowSat(f)
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] >= self.cn.b_CLC:
break
self.addCLCcontrol(path)
break
for c,f in removed_flows: # add back flows if possible
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
def getCLCcandidates(self, option=None):
candidates = list(set(self.cn.C) - (set(self.CLCs) | set(self.banlist)))
# avoid CRCs to be used as CLCs as long as possible
if len(set(candidates) - set(self.CRCs)) > 0:
candidates = [c for c in candidates if not c in self.CRCs]
remaining_nodes = set(self.cn.V) - set(self.Controlled)
remaining_flows = set(self.cn.F) - set(self.Satisfied)
if option == "neighbors":
ctmp = [(k, len((set([k]) | set(self.cn.G.neighbors(k))) - set(self.Controlled))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_nodes"
candidates = self.getCLCcandidates("isolated_nodes")
elif option == "isolated_nodes":
paths = []
for i in remaining_nodes:
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=i))
paths.sort(key=len)
candidates = []
for p in paths:
if not p[0] in candidates:
candidates.append(p[0])
elif option == "flows":
ctmp = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_flows"
candidates = self.getCLCcandidates("isolated_flows")
elif option == "flows_nn": # CAUTION: very slow for many flows in the network! Currently not used.
ctmp = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied)) + sum(len(set(self.cn.Wf[j]) - set(self.Satisfied)) for j in self.cn.G.neighbors(k))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_flows"
candidates = self.getCLCcandidates("isolated_flows")
elif option == "isolated_flows":
paths = []
for f in remaining_flows:
for i in self.cn.Wb[f]:
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=i))
paths.sort(key=len)
candidates = []
for p in paths:
if not p[0] in candidates:
candidates.append(p[0])
elif option == "isolated_flows2": # just a test, currently not used.
nodes_with_flows = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in self.cn.V]
nodes_with_flows.sort(key=lambda x: x[1], reverse=True)
node_with_most_flows = nodes_with_flows[0][0]
paths = []
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=node_with_most_flows))
paths.sort(key=len)
candidates = [p[0] for p in paths]
elif option == "neighbors_and_flows": # currently not used.
ctmp = [(k, len(set([k]) | set(self.cn.G.neighbors(k)) - set(self.Controlled)) + len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
candidates = [x[0] for x in ctmp]
return candidates
def addNewCLC(self, v):
paths = []
pf = set([])
nc = 0
nnc = 0
fs = 0
tmp = self.checkCLC([v])
if tmp == True:
nc += 1
if v not in self.Controlled:
nnc += 1
self.addCLCcontrol([v])
pf = self.updatePotentialFlows(pf, v, [v])
else:
self.cn.C.remove(v)
for i in self.cn.V:
if i <> v and (i not in self.Controlled or len(set(self.cn.Wf[i]) - set(self.Satisfied)) > 0):
paths.append((nx.shortest_path(self.cn.G, source=v, target=i), i in self.Controlled, len(set(self.cn.Wf[i]) - set(self.Satisfied))))
if len(self.Controlled) < len(self.cn.V):
paths.sort(key=lambda x: x[1])
paths.sort(key=lambda x: len(x[0]))
notyetsolved = True
else:
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
while (len(paths) > 0 or len(pf) > 0) and (len(self.Controlled) < len(self.cn.V) or len(self.Satisfied) < len(self.cn.F)):
# print "Controlled: " + str(len(self.Controlled)) + " / " + str(len(self.cn.V)) + " Satisfied: " + str(len(self.Satisfied)) + " / " + str(len(self.cn.F))
# print "Current CLC: " + str(v) + " NNC: " + str(nnc) + " VCRatio: " + str(self.VCRatio)
# time.sleep(0.1)
if notyetsolved and len(self.Controlled) == len(self.cn.V):
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
if (len(pf) > 0 and (nnc >= self.VCRatio or len(self.Controlled) == len(self.cn.V))) or len(paths) == 0:
f = self.getFlow(pf, self.getFlowOption)
tmp = self.checkFlowSat(v, f)
if tmp == True:
self.addFlowSat(v, f)
fs += 1
pf.remove(f)
else:
if len(paths) == 0:
break
p = list(paths[0][0])
del paths[0]
tmp = self.checkCLC(p)
if tmp == True:
nc += 1
if p[-1] not in self.Controlled:
nnc += 1
self.addCLCcontrol(p)
pf = self.updatePotentialFlows(pf, v, [p[-1]])
elif tmp > 2: # issue is exhausted processing capacity
break
def updateVCRatio(self):
self.VCRatio = (len(self.cn.V) - len(self.Controlled)) / len(self.cn.C)
def updatePotentialFlows(self, pf, v, nn):
cpf = set([])
for w in nn:
cpf = cpf | set(self.cn.Wf[w])
for f in cpf:
if self.cn.fdata[f]['isSat'] == False and set(self.cn.Wb[f]) <= set(self.cn.G.node[v]['CLCcontrol']):
pf.add(f)
return pf
def getFlow(self, pf, option):
tmp = list(pf)
if len(tmp) == 0:
return None
else:
if option == "MostDemanding":
tmp.sort(key=lambda f: self.cn.fdata[f]['p_flow'], reverse=True)
elif option == "LeastDemanding":
tmp.sort(key=lambda f: self.cn.fdata[f]['p_flow'])
return tmp[0]
def findCRC(self, v):
# check already active CRCs first
paths = []
for i in self.CRCs:
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
paths.sort(key=len)
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
# need to add a new CRC, at first try to avoid active CLCs and CLC candidate
return self.findNewCRC(v)
def findNewCRC(self, v):
paths = []
for i in list(set(self.cn.C) - (set(self.CRCs) | set(self.CLCs) | set([v]))):
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
if len(self.CRCs) == 0: # first CRC should be placed centrally
paths.sort(key=lambda p: sum(len(nx.shortest_path(self.cn.G, source=p[0], target=c)) for c in self.cn.C))
else:
paths.sort(key=lambda p: len(p))
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
# last option: try CLC candidate, then already active CLCs
if self.checkCRC([v]) == True:
self.addCRCcontrol([v])
return True
paths = []
for i in self.CLCs:
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
paths.sort(key=len)
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
return False
# checks if a certain CRC control can be established
def checkCRC(self,path):
v = path[0]
if sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)) + self.cn.p_CRC/self.cn.G.node[v]['p_rem'] > self.cn.l_CRC:
return False
for i in range(0,len(path)-1):
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] < self.cn.b_CRC:
return False
return True
# checks if a certain CLC control can be established
def checkCLC(self,path):
v = path[0]
if sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)) + self.cn.p_CLC/self.cn.G.node[v]['p_rem'] > self.cn.l_CLC:
return 4
for i in range(0,len(path)-1):
# reserve capacity for 2 CRC assignments (might be beneficial later)
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] - 2*self.cn.b_CRC < self.cn.b_CLC:
return 2
return True
# checks if a flow f can be satisfied by a controller v
def checkFlowSat(self, v, f):
if self.cn.fdata[f]['isSat'] == True:
return False
if not set(self.cn.Wb[f]) <= set(self.cn.G.node[v]['CLCcontrol']):
return False
flowedgecount = {}
for k in self.cn.Wb[f]:
path = self.cn.G.node[v]['CLCpaths'][k]
pathLat = sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1))
for i in range(0,len(path)-1):
if path[i] < path[i+1]:
edgekey = (path[i],path[i+1])
else:
edgekey = (path[i+1],path[i])
if not edgekey in flowedgecount:
flowedgecount[edgekey] = 1
else:
flowedgecount[edgekey] += 1
if pathLat + self.cn.fdata[f]['p_flow']/self.cn.G.node[v]['p_rem'] > self.cn.fdata[f]['l_flow']:
return False
for e in flowedgecount:
# reserve capacity for 2 CRC and 2 CLC assignments (complete control structure more important than flow satisfaction)
if self.cn.G.edge[e[0]][e[1]]['b_rem'] - 2*self.cn.b_CRC - 2*self.cn.b_CLC < flowedgecount[(e[0],e[1])] * self.cn.fdata[f]['b_flow']:
return False
return True
def addCRCcontrol(self, path):
v = path[0]
w = path[-1]
if w in self.cn.G.node[v]['CRCcontrol']:
print "Critical error: Tried to add allready existing CRC control!"
exit(1)
for i in range(0,len(path)-1):
self.cn.G.edge[path[i]][path[i+1]]['b_rem'] -= self.cn.b_CRC
self.cn.G.node[v]['p_rem'] -= self.cn.p_CRC/(self.cn.l_CRC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['ProcCRC'][w] = self.cn.p_CRC/(self.cn.l_CRC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['CRCcontrol'].append(w)
self.cn.G.node[v]['CRCpaths'][w] = path
self.cn.G.node[v]['isCRC'] = True
self.cn.G.node[w]['CRC'] = v
self.cn.G.node[w]['pathtoCRC'] = path
if v not in self.CRCs:
self.CRCs.append(v)
if self.cn.G.node[w]['isCLC'] == True and w in self.uncontrolledCLCs:
self.uncontrolledCLCs.remove(w)
if len(self.uncontrolledCLCs) == 0 and len(self.Controlled) == len(self.cn.V):
self.state = "Solved"
def addCLCcontrol(self, path):
self.newCLCcontrols += 1
v = path[0]
w = path[-1]
if w in self.cn.G.node[v]['CLCcontrol']:
print "Critical error: Tried to add allready existing CLC control!"
exit(1)
for i in range(0,len(path)-1):
self.cn.G.edge[path[i]][path[i+1]]['b_rem'] -= self.cn.b_CLC
self.cn.G.node[v]['p_rem'] -= self.cn.p_CLC/(self.cn.l_CLC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['ProcCLC'][w] = self.cn.p_CLC/(self.cn.l_CLC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['CLCcontrol'].append(w)
self.cn.G.node[v]['CLCpaths'][w] = path
self.cn.G.node[v]['isCLC'] = True
self.cn.G.node[w]['CLCs'].append(v)
self.cn.G.node[w]['pathtoCLC'][v] = path
if v not in self.CLCs:
self.CLCs.append(v)
if w not in self.Controlled:
self.Controlled.append(w)
def addFlowSat(self, v, f):
self.newFlowSats += 1
self.cn.fdata[f]['isSat'] = True
self.cn.fdata[f]['CLC'] = v
flowpaths = [self.cn.G.node[v]['CLCpaths'][k] for k in self.cn.Wb[f]]
for path in flowpaths:
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] -= self.cn.fdata[f]['b_flow']
maxpathlatency = max([sum(2 * self.cn.G.edge[path[i]][path[i + 1]]['l_cap'] for i in range(0, len(path) - 1)) for path in flowpaths])
self.cn.G.node[v]['p_rem'] -= self.cn.fdata[f]['p_flow'] / (self.cn.fdata[f]['l_flow'] - maxpathlatency)
self.cn.G.node[v]['ProcFlow'][f] = self.cn.fdata[f]['p_flow'] / (self.cn.fdata[f]['l_flow'] - maxpathlatency)
self.cn.G.node[v]['Satisfies'].append(f)
self.Satisfied.append(f)
def remCRC(self, v):
self.cn.G.node[v]['isCRC'] = False
tmp = list(self.cn.G.node[v]['CRCcontrol'])
for w in tmp:
self.remCRCcontrol(v, w)
self.CRCs.remove(v)
def remCRCcontrol(self, v, w):
if not w in self.cn.G.node[v]['CRCcontrol']:
print "Critical error: Tried to remove non-existing CRC control!"
exit(1)
path = self.cn.G.node[v]['CRCpaths'][w]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.b_CRC
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcCRC'][w]
del self.cn.G.node[v]['ProcCRC'][w]
self.cn.G.node[v]['CRCcontrol'].remove(w)
del self.cn.G.node[v]['CRCpaths'][w]
self.cn.G.node[w]['CRC'] = None
self.cn.G.node[w]['pathtoCRC'] = None
if self.cn.G.node[w]['isCLC'] == True:
self.uncontrolledCLCs.append(w)
self.state = "NOT SOLVED"
if len(self.cn.G.node[v]['CRCcontrol']) == 0:
self.remCRC(v)
def remCLC(self, v):
tmp = list(self.cn.G.node[v]['Satisfies'])
for f in tmp:
self.remFlowSat(f)
tmp = list(self.cn.G.node[v]['CLCcontrol'])
for w in tmp:
self.remCLCcontrol(v, w)
self.cn.G.node[v]['isCLC'] = False
if self.cn.G.node[v]['CRC'] is not None:
self.remCRCcontrol(self.cn.G.node[v]['CRC'], v)
self.CLCs.remove(v)
def remCLCcontrol(self, v, w):
if not w in self.cn.G.node[v]['CLCcontrol']:
print "Critical error: Tried to remove non-existing CLC control!"
exit(1)
path = self.cn.G.node[v]['CLCpaths'][w]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.b_CLC
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcCLC'][w]
del self.cn.G.node[v]['ProcCLC'][w]
self.cn.G.node[v]['CLCcontrol'].remove(w)
del self.cn.G.node[v]['CLCpaths'][w]
self.cn.G.node[w]['CLCs'].remove(v)
del self.cn.G.node[w]['pathtoCLC'][v]
if len(self.cn.G.node[w]['CLCs']) == 0:
self.Controlled.remove(w)
self.state = "NOT SOLVED"
def remFlowSat(self, f, stopiperf=True):
v = self.cn.fdata[f]['CLC']
for k in self.cn.Wb[f]:
path = self.cn.G.node[v]['CLCpaths'][k]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.fdata[f]['b_flow']
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcFlow'][f]
del self.cn.G.node[v]['ProcFlow'][f]
self.cn.G.node[v]['Satisfies'].remove(f)
if stopiperf == True:
tstopstart = time.time()
self.stopTrafficGenerationForSingleFlow(fid=f)
tstopend = time.time()
self.lastiperfstoptime += tstopend - tstopstart
self.cn.fdata[f]['isSat'] = False
self.cn.fdata[f]['CLC'] = None
self.cn.fdata[f]['isGen'] = False
self.Satisfied.remove(f)
def addFlow(self, stime=0, dur=None, amount=1):
for i in range(1, amount + 1):
self.cn.addFlow(stime=stime, dur=dur)
if self.flexOperation and len(self.CLCs) > 0:
if amount == 1:
self.browseCurrentCLCsforSingleFlow(self.cn.F[-1])
else:
self.browseCurrentCLCs()
self.checkLowload()
def remFlow(self, f, stopiperf=True):
# self.cn.fdata[-f] = copy.deepcopy(self.cn.fdata[f]) # uncomment ONLY for debbuging!
if self.cn.fdata[f]['isSat'] == True:
self.remFlowSat(f, stopiperf)
tmplist = list(self.cn.Wb[f])
self.cn.remFlow(f)
if self.flexOperation:
self.cleanupCLCcontrols(tmplist)
def clearFlows(self):
tmp = list(self.cn.F)
for f in tmp:
self.remFlow(f)
def browseCurrentCLCs(self):
self.updateVCRatio()
paths = []
pf = {}
nnc = {}
for v in self.CLCs:
pf[v] = self.updatePotentialFlows(set([]), v, self.cn.G.node[v]['CLCcontrol'])
nnc[v] = 0
for i in list(set(self.cn.V) - set(self.cn.G.node[v]['CLCcontrol'])):
if i not in self.Controlled or len(set(self.cn.Wf[i]) - set(self.Satisfied)) > 0:
paths.append((nx.shortest_path(self.cn.G, source=v, target=i), i in self.Controlled, len(set(self.cn.Wf[i]) - set(self.Satisfied))))
if len(self.Controlled) < len(self.cn.V):
paths.sort(key=lambda x: x[1])
paths.sort(key=lambda x: len(x[0]))
notyetsolved = True
else:
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
while (len(paths) > 0 or sum(len(pf[v]) for v in pf) > 0) and (len(self.Controlled) < len(self.cn.V) or len(self.Satisfied) < len(self.cn.F)):
if notyetsolved and len(self.Controlled) == len(self.cn.V):
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
if sum(len(pf[v]) for v in pf) > 0 and (len(paths) == 0 or len(self.Controlled) == len(self.cn.V)):
currv = [v for v in pf if len(pf[v]) > 0][0]
else:
currv = paths[0][0][0]
if self.CLCload(currv) > 0.999:
pf[currv] = set([])
paths = [p for p in paths if p[0][0] <> currv]
elif len(pf[currv]) > 0 and (len(paths) == 0 or nnc[currv] >= self.VCRatio or len(self.Controlled) == len(self.cn.V)):
f = self.getFlow(pf[currv], self.getFlowOption)
flowsat = False
tmp = self.checkFlowSat(currv, f)
if tmp == True:
self.addFlowSat(currv, f)
flowsat = True
if flowsat == True:
for w in pf:
if f in pf[w]:
pf[w].remove(f)
else:
pf[currv].remove(f)
else:
if len(paths) == 0:
break
p = list(paths[0][0])
del paths[0]
tmp = self.checkCLC(p)
if tmp == True:
if p[-1] not in self.Controlled:
nnc[currv] += 1
self.addCLCcontrol(p)
pf[currv] = self.updatePotentialFlows(pf[currv], currv, [p[-1]])
elif tmp > 2:
paths = [p for p in paths if p[0][0] <> currv]
pf[currv] = set([])
def browseCurrentCLCsforSingleFlow(self,f):
CLCstmp = list([c for c in self.CLCs if set(self.cn.Wb[f]) <= set(self.cn.G.node[c]['CLCcontrol'])])
CLCstmp.sort(key=lambda c: sum(len(set(self.cn.G.node[c]['Satisfies']) & set(self.cn.Wf[v])) for v in self.cn.Wb[f]), reverse=True)
CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f]))
for c in CLCstmp:
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
return 1
CLCstmp = list([c for c in self.CLCs if not set(self.cn.Wb[f]) <= set(self.cn.G.node[c]['CLCcontrol'])])
#CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f]))
#CLCstmp.sort(key=lambda c: len(set(self.cn.Wb[f]) - set(self.cn.G.node[c]['CLCcontrol'])))
CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f] if not i in self.cn.G.node[c]['CLCcontrol']))
for c in CLCstmp:
paths = (nx.shortest_path(self.cn.G, source=c, target=i) for i in self.cn.Wb[f] if not i in self.cn.G.node[c]['CLCcontrol'])
for p in paths:
tmp = self.checkCLC(p)
if tmp == True:
self.addCLCcontrol(p)
else:
break
if tmp == True:
flowsat = False
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
flowsat = True
if flowsat == False:
for p in paths:
self.remCLCcontrol(c,p[-1])
def rearrangeCLCs(self):
if len(self.CRCs) <= 1:
return 0
ctmp = list(self.CRCs)
ctmp.sort(key=lambda c: len(self.cn.G.node[c]['CRCcontrol']))
checklist = [1 for c in ctmp]
for i in range(0,len(ctmp)):
if checklist[i] == 0:
continue
c = ctmp[i]
vtmp = list(self.cn.G.node[c]['CRCcontrol'])
for v in vtmp:
for j,d in reversed(list(enumerate(ctmp))):
if j <= i:
break
else:
p = nx.shortest_path(self.cn.G, source=d, target=v)
if self.checkCRC(p) == True:
self.remCRCcontrol(c,v)
self.addCRCcontrol(p)
checklist[j] = 0
break
def cleanupCLCcontrols(self,nodelist):
vtmp = list(nodelist)
random.shuffle(vtmp)
for v in vtmp:
ctmp = list(self.cn.G.node[v]['CLCs'])
random.shuffle(ctmp)
for c in ctmp:
if c <> v and len(self.cn.G.node[v]['CLCs']) > 1 and len(set(self.cn.G.node[c]['Satisfies']) & set(self.cn.Wf[v])) == 0:
self.remCLCcontrol(c,v)
self.cleanedUpCLCcontrols += 1
def CLCload(self,c): # relative load: used for statistics
return 1.0 - self.cn.G.node[c]['p_rem']/self.cn.G.node[c]['p_node']
def absCLCload(self,c): # absolute load: used for Lowload detection
return self.cn.G.node[c]['p_node'] - self.cn.G.node[c]['p_rem']
def getAverageCLCload(self):
return sum(self.CLCload(c) for c in self.CLCs)/len(self.CLCs)
def getTotalAbsCLCload(self):
return sum(self.absCLCload(c) for c in self.CLCs)
def getCLCwithLeastAbsLoad(self):
CLCstmp = list(self.CLCs)
CLCstmp.sort(key=lambda c: self.absCLCload(c))
return CLCstmp[0]
def getCLCestimate(self):
CLCstmp = list(self.CLCs)
CLCstmp.sort(key=lambda c: self.absCLCload(c), reverse=True)
totalload = self.getTotalAbsCLCload()
est = 0
psum = 0
while psum * self.L_lowload < totalload and est < len(CLCstmp):
c = CLCstmp[est]
psum += self.cn.G.node[c]['p_node']
est += 1
return est
def updateTime(self,t):
self.current_time = t
self.checkLowload()
def checkLowload(self):
est = self.getCLCestimate()
if est < len(self.CLCs):
if self.LL_alarm == False:
self.LL_alarm = True
self.LL_alarm_time = self.current_time
if self.LL_alarm == True and self.current_time - self.LL_alarm_time > self.T_lowload:
self.LL_alarm = False
self.LL_execution = True
else:
self.LL_alarm = False
def lowload(self):
est = self.getCLCestimate()
while len(self.CLCs) > est:
self.remCLC(self.getCLCwithLeastAbsLoad())
if len(self.Satisfied) < len(self.cn.F):
self.browseCurrentCLCs()
if len(self.CRCs) > 1:
self.rearrangeCLCs()
self.LL_execution = False
def getAverageCLCpathlength(self):
return sum(len(self.cn.G.node[c]['CLCpaths'][v]) for c in self.CLCs for v in self.cn.G.node[c]['CLCcontrol']) / sum(len(self.cn.G.node[c]['CLCcontrol']) for c in self.CLCs)
def getAverageCRCpathlength(self):
return sum(len(self.cn.G.node[c]['CRCpaths'][v]) for c in self.CRCs for v in self.cn.G.node[c]['CRCcontrol']) / sum(len(self.cn.G.node[c]['CRCcontrol']) for c in self.CRCs)
def getAverageLinkUsage(self):
return sum(self.cn.G[u][v]['b_rem']/self.cn.G[u][v]['b_cap'] for u,v in self.cn.G.edges()) / self.cn.G.number_of_edges()
def CLCcontrolRatio(self):
return sum(len(self.cn.G.node[c]['CLCcontrol']) for c in self.CLCs) / len(self.cn.V)
def flowUsesLink(self,f,v,w):
if self.cn.fdata[f]['isSat'] == True:
c = self.cn.fdata[f]['CLC']
for k in self.cn.Wb[f]:
path = self.cn.G.node[k]['pathtoCLC'][c]
if any(([v,w] == path[i:i+1]) for i in xrange(len(path)-1)) or any(([w,v] == path[i:i+1]) for i in xrange(len(path)-1)):
return True
return False
def CLCoutput(self, c):
out = "Data for CLC " + str(c) + ":\n"
out += "Load: " + str(self.CLCload(c)) + "\n"
out += "p_rem: " + str(self.cn.G.node[c]['p_rem']) + ", Nodes controlled: " + str(len(self.cn.G.node[c]['CLCcontrol'])) + ", Flows satisfied: " + str(len(self.cn.G.node[c]['Satisfies'])) + "\n"
if len(self.cn.G.node[c]['Satisfies']) > 0:
out += "Biggest flow satisfied: " + str(max(self.cn.fdata[f]['p_flow'] for f in self.cn.G.node[c]['Satisfies'])) + "\n"
return out
# ------------------------------------------------------------------------------------------------------------
# Testbed Code
# ------------------------------------------------------------------------------------------------------------
def setupEmulationNetwork(self):
topo = Topo(link=TCLink, host=CPULimitedHost)
# linkopts = dict(bw=1000, delay='0ms', loss=0, use_htb=True)
linkopts = dict() # delay='0ms', use_hfsc=True
# clcpower = 0.7/len(self.cn.C) #TODO
# cpupower = 0.1/(len(self.cn.V) - len(self.cn.C))
# Add switches and associated hosts
s = 1
for n in self.cn.G.node:
hName = 'h' + str(n)
hIp = Tools.makeIP(n + 1)
hMac = Tools.makeMAC(n + 1)
# if n in self.cn.C:
# cpupower = clcpower
# hObj = topo.addHost(name=hName, ip=hIp, mac=hMac, cpu=cpupower)
hObj = topo.addHost(name=hName, ip=hIp, mac=hMac)
self.Hosts[hName] = {'obj': hObj, 'ip': hIp, 'mac': hMac}
sName = 's' + str(n)
sDpid = Tools.makeDPID(n + 1)
sListenPort = (13000 + s - 1)
switchopts = dict(listenPort=sListenPort)
sObj = topo.addSwitch(name=sName, dpid=sDpid, **switchopts)
self.Switches[sName] = {'obj': sObj, 'dpid': sDpid, 'listenport': sListenPort}
s += 1
topo.addLink(hObj, sObj, **linkopts)
for key, value in self.cn.G.edges():
sName1 = 's' + str(key)
sName2 = 's' + str(value)
sObj1 = self.Switches[sName1]['obj']
sObj2 = self.Switches[sName2]['obj']
topo.addLink(sObj1, sObj2, **linkopts)
if self.emulator == "Mininet":
# Mininet
switch = functools.partial(OVSSwitch, protocols='OpenFlow13')
net = Mininet(topo=topo, switch=switch, controller=RemoteController, host=CPULimitedHost, link=TCLink)
net.start()
# Save the Mininet object for future reference
self.TestbedNetwork = net
elif self.emulator == "MaxiNet":
# MaxiNet
cluster = maxinet.Cluster()
exp = maxinet.Experiment(cluster, topo, switch=OVSSwitch)
exp.setup()
# Save the Experiment object for future reference
for switch in exp.switches:
exp.get_worker(switch).run_cmd('ovs-vsctl -- set Bridge %s ' % switch.name + 'protocols=OpenFlow10,OpenFlow12,OpenFlow13')
self.TestbedNetwork = exp
else:
print("Error: Emulator missing!")
exit(1)
# Start iperf server in potential CLC host
for n in self.cn.G.node:
if n in self.cn.C:
hName = 'h' + str(n)
hObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
hObj.cmd("socat -T 5 UDP-LISTEN:5001,fork,reuseaddr EXEC:\'/bin/cat\' &")
# hObj.cmd("ncat -e /bin/cat -k -u -m 1000 -l 5001 &")
# hObj.cmd("iperf -u -f 'b' -s > /tmp/iperf_server_" + hName + ".log &")
# for n in self.cn.G.node:
# if n in self.cn.C:
# hName = 'h' + str(n)
# hObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
# hObj.cmd("ps -eaf|grep \"socat -T 5 UDP-LISTEN:5001,fork,reuseaddr EXEC:/bin/cat\"|grep -v \"grep socat\"|awk \'{print $2}\'|tr \'\\n\' \',\' > /tmp/socatpid.txt")
# f = open("/tmp/socatpid.txt", "r")
# line = f.readline()
# linetoken = re.split(",", line)
# for onelinetocken in linetoken:
# if len(onelinetocken) > 1 and onelinetocken not in self.ParentProcess:
# self.ParentProcess.append(onelinetocken)
# f.close()
# print self.ParentProcess
def populateNetworkLinks(self):
urlpath = "http://127.0.0.1:8080/fcapfnetworkcontroller/topology/links"
resp = requests.get(urlpath)
self.Links = resp.json()
# print self.Links
def checkRoutingPath(self, src, dst, path):
key = (src, dst, path)
self.CurrentRoutingPaths.append(key)
foundPath = key in self.RoutingPaths
if foundPath == False:
self.RoutingPaths.append(key)
return foundPath
def modifyRoutingTable(self):
for n in self.cn.G.node:
node = self.cn.G.node[n]
pathToCLC = node['pathtoCLC']
# print pathToCLC
for clc, path in pathToCLC.items():
# print path
src = clc
dst = n
foundPath = self.checkRoutingPath(src, dst, path)
if foundPath == False:
self.addRoutingPath(src, dst, path)
self.requestAddRoutingEntries()
# print "Earlier Routing Entries"
# self.RoutingEntries.sort()
# print self.RoutingEntries
self.clearObsoleteRoutingPaths()
# print "After clean Routing Entries"
# self.RoutingEntries.sort()
# print self.RoutingEntries
# print "Current Routing Entries"
# self.CurrentEntries.sort()
# print self.CurrentEntries
# Do not delete forwarding entries from switch, because there might be some flow still running,
# we will set the timeout for the entries, so that the entries will be removed automatically
# for being idle for sometime, see the function we commented the call for forward entry deletion.
self.requestDeleteRoutingEntries()
def addRoutingPath(self, src, dst, path):
links = self.Links
nw_src = Tools.makeIP(src + 1)
nw_dst = Tools.makeIP(dst + 1)
if len(path) > 1:
i = 0
while i < len(path) - 1:
n1 = path[i]
n2 = path[i + 1]
if n1 == src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
# Assuming host is always connected to the port 1 of the switch
action = 1
inport = port2
switch = node2
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 == src and n2 != dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 != src and n2 != dst:
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 != src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
action = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
else:
pass # do nothing
i += 1
def addRoutingEntry(self, switch, inport, nwsrc, nwdst, action):
dpid = "0000" + self.Switches[switch]['dpid']
# Add route entry for ARP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"action": int(action),
"dltype": "arp",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
# Add route entry for TCP
payload = {"inport": int(inport),
"action": int(action),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst}
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"action": int(action),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
def requestAddRoutingEntries(self):
try:
url = "http://127.0.0.1:8080/fcapfnetworkcontroller/flowtable/addflows"
data = json.dumps(self.JsonEntries)
resp = requests.put(url, data=data)
if resp.status_code != 200:
print "Something is wrong adding Routing entries, check controller log!!"
except:
print "Controller is not available!!"
del self.JsonEntries
self.JsonEntries = []
def clearObsoleteRoutingPaths(self):
entries = copy.deepcopy(self.RoutingPaths)
for entry in entries:
(src, dst, path) = entry
if entry not in self.CurrentRoutingPaths:
self.deleteRoutingPath(src, dst, path)
self.RoutingPaths.remove(entry)
del entries
del self.CurrentRoutingPaths
self.CurrentRoutingPaths = []
def deleteRoutingPath(self, src, dst, path):
links = self.Links
nw_src = Tools.makeIP(src + 1)
nw_dst = Tools.makeIP(dst + 1)
if len(path) > 1:
i = 0
while i < len(path) - 1:
n1 = path[i]
n2 = path[i + 1]
if n1 == src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
# Assuming host is always connected to the port 1 of the switch
action = 1
inport = port2
switch = node2
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 == src and n2 != dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 != src and n2 != dst:
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 != src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
action = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
else:
pass # do nothing
i += 1
def deleteRoutingEntry(self, switch, inport, nwsrc, nwdst):
dpid = "0000" + self.Switches[switch]['dpid']
# Delete route entry for ARP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"dltype": "arp",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
# Delete route entry for IP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
def requestDeleteRoutingEntries(self):
try:
url = "http://127.0.0.1:8080/fcapfnetworkcontroller/flowtable/delflows"
data = json.dumps(self.JsonEntries)
resp = requests.put(url, data=data)
if resp.status_code != 200:
print "Something is wrong deleting Routing entries, check controller log!!"
except:
print "Controller is not available!!"
del self.JsonEntries
self.JsonEntries = []
def checkBucketInCLC(self, clc, bucketId):
key = (clc, bucketId)
found = key in self.CLCBucketIdList
if found == False:
self.CLCBucketIdList.append(key)
return found
def generateTrafficFlow(self):
iperfcount = 0
for fid in self.cn.fdata:
flow = self.cn.fdata[fid]
if flow['isSat'] == True and flow['isGen'] == False:
self.getHostForFlow(fid)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
delay = self.cn.fdata[fid]['p_flow'] / self.cn.G.node[flow['CLC']]['ProcFlow'][fid] * 1000
bucket_id = int(round(delay * 10))
hName = "h" + str(flow['CLC'])
clcObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
clcIP = Tools.makeIP(flow['CLC'] + 1)
for hObj in self.hObjs:
(n, obj) = hObj
id = bucket_id + n * 1000
bucket_id = bucket_id + n * 65536
hexFid = "0x%0.8x" % bucket_id
if False == self.checkBucketInCLC(flow['CLC'], bucket_id):
clcObj.cmd(g_conf_script_path + " add " + hName + "-eth0 1 " + str(id) + " " + str(delay) + " " + hexFid)
obj.cmd(g_iperf_path + " -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur) + " -k " + str(bucket_id) + " > /tmp/" + self.iperf_log_tag + "iperf_client_" + str(fid) + "_" + str(bw) + "_" + str(dur) + "_" + str(obj.name) + "_to_" + str(hName) + ".log &")
self.cn.fdata[fid]['genstime'] = time.time()
self.cn.fdata[fid]['isGen'] = True
iperfcount += 1
self.TotalSatisfied.append(fid)
if fid in self.TotalUnsatisfied:
self.TotalUnsatisfied.remove(fid)
elif flow['isSat'] == False:
self.TotalUnsatisfied.append(fid)
print "Flow not satisfied : " + str(flow)
else:
pass # do nothing
return iperfcount
def generateTrafficForSingleFlow(self, fid):
flow = self.cn.fdata[fid]
if flow['isSat'] == True and flow['isGen'] == False:
self.getHostForFlow(fid)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
delay = self.cn.fdata[fid]['p_flow'] / self.cn.G.node[flow['CLC']]['ProcFlow'][fid] * 1000
bucket_id = int(round(delay * 10))
hName = "h" + str(flow['CLC'])
clcObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
clcIP = Tools.makeIP(flow['CLC'] + 1)
for hObj in self.hObjs:
(n,obj)=hObj
id = bucket_id + n * 1000
bucket_id = bucket_id + n * 65536
hexFid = "0x%0.8x" % bucket_id
if False == self.checkBucketInCLC(flow['CLC'], bucket_id):
clcObj.cmd(g_conf_script_path + " add " + hName + "-eth0 1 " + str(id) + " " + str(delay) + " " + hexFid)
obj.cmd(g_iperf_path + " -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur) + " -k " + str(bucket_id) + " > /tmp/" + self.iperf_log_tag + "iperf_client_" + str(fid) + "_" + str(bw) + "_" + str(dur) + "_" + str(obj.name) + "_to_" + str(hName) + ".log &")
self.cn.fdata[fid]['genstime'] = time.time()
self.cn.fdata[fid]['isGen'] = True
self.TotalSatisfied.append(fid)
if fid in self.TotalUnsatisfied:
self.TotalUnsatisfied.remove(fid)
elif flow['isSat'] == False:
self.TotalUnsatisfied.append(fid)
print "Flow not satisfied : " + str(flow)
else:
pass # do nothing
def stopTrafficGenerationForSingleFlow(self, fid):
flow = self.cn.fdata[fid]
currTime = time.time()
# giving 0.01 sec extra time assuming the processing time of to start the ipref
if (flow['duration'] > currTime - flow['genstime'] + 0.01) and flow['isSat'] == True and flow['isGen'] == True:
# print "Why I am here duration " + str(flow['duration']) + " currTime " + str(currTime) + " genstime " + str(flow['genstime']) + " difference " + str(currTime - flow['genstime'])
self.getHostForFlow(fid)
clcIP = Tools.makeIP(flow['CLC'] + 1)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
searchStr = "iperf -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur)
for hObj in self.hObjs:
(n,obj)=hObj
obj.cmd("ps -eaf|grep \"" + searchStr + "\"|awk \'{print \"kill -9 \" $2}\'|sh")
self.cn.fdata[fid]['duration'] = flow['duration'] - (currTime - flow['genstime'])
self.cn.fdata[fid]['isGen'] = False
self.TotalFlowStopped.append(fid)
if fid in self.TotalSatisfied:
self.TotalSatisfied.remove(fid)
# print "stopped flow for " + str(fid) + " and search string is " + searchStr
def getLinkPort(self, links, node1, node2):
"""Return ports of Links between node1 and node2"""
port1 = port2 = None
dpid1 = "0000" + self.Switches[node1]['dpid']
dpid2 = "0000" + self.Switches[node2]['dpid']
for link in links:
if (dpid1, dpid2) == (link['src']['dpid'], link['dst']['dpid']):
# print link
port1 = int(link['src']['port_no'], 16)
port2 = int(link['dst']['port_no'], 16)
break
elif (dpid1, dpid2) == (link['dst']['dpid'], link['src']['dpid']):
# print link
port1 = int(link['dst']['port_no'], 16)
port2 = int(link['src']['port_no'], 16)
break
else:
pass # do nothing
return port1, port2
def getHostForFlow(self, flow):
del self.hObjs
self.hObjs = []
for n in self.cn.Wb[flow]:
hName = 'h' + str(n)
if self.emulator == "Mininet":
# Mininet
self.hObjs.append((n,self.TestbedNetwork.getNodeByName(hName)))
elif self.emulator == "MaxiNet":
# MaxiNet
self.hObjs.append((n,self.TestbedNetwork.get_node(hName)))
else:
print("Error: Emulator missing!")
exit(1)
# return hObjs
| from __future__ import division
import networkx as nx
import sys, math, random, time, copy
from crowd_network import *
import pdb
# pdb.set_trace()
# Note: Code uses older terminology: CRC=RCA, CLC=LCA, (sometimes) Flow=DFG
from mininet.node import OVSSwitch
from mininet.node import RemoteController
from mininet.link import TCLink
from mininet.node import CPULimitedHost
from mininet.topo import Topo
from MaxiNet.Frontend import maxinet
from MaxiNet.tools import Tools
from mininet.net import Mininet
import functools
import subprocess, shlex
import requests
import json
from ryu.lib import dpid as dpid_lib
import traceback
import datetime
import re
g_iperf_path = "/home/maxinet/iperf2-code/src/iperf"
g_conf_script_path = "/home/maxinet/flexfcapf/code/configure_delay.sh"
g_echoserver_path = "python /home/maxinet/flexfcapf/code/echoserver.py"
class CPFlex:
def __init__(self, filename=None, flowOption="LeastDemanding", scenario=None, modify_controllers=False, contrProb=None, cn=None, inputversion="flex", evalscen="generic", emulator=None):
if emulator is None:
print("Error: Emulator missing!")
exit(1)
if filename is not None:
if scenario is None:
read_weights_from_file = True
else:
read_weights_from_file = False
self.cn = CrowdNetwork()
valid_network = self.cn.generate_from_file(filename, read_weights_from_file, scenario, modify_controllers, contrProb, inputversion, evalscen)
if valid_network:
self.state = "NOT SOLVED"
else:
self.state = "INVALID NETWORK"
print "Error: Invalid network!"
exit(1)
else:
if cn is None:
print "Error: Nothing to create CPFlex network from!"
exit(1)
else:
self.cn = cn
self.state = "NOT SOLVED"
if len(self.cn.C) == 0:
print "Error: Cannot work without potential hosts!"
exit(1)
self.getFlowOption = flowOption
self.iterations = 0
self.Controlled = []
self.CRCs = []
self.CLCs = []
self.Satisfied = []
self.uncontrolledCLCs = []
self.VCRatio = len(self.cn.V)/(len(self.cn.C))
self.banlist = []
self.flexOperation = False
self.current_time = None # will be set by updateTime if run in simulation mode
self.L_lowload = 0.9
self.T_lowload = 60.0
self.LL_alarm = False
self.LL_alarm_time = None # will be set at first LL_alarm
self.LL_execution = False
self.lastruntime = 0
self.lastiperfstoptime = 0
self.Switches = {}
self.Hosts = {}
self.Links = None
self.RoutingPaths = []
self.TestbedNetwork = None
self.ParentProcess = []
self.TotalSatisfied = []
self.TotalUnsatisfied = []
self.TotalFlowStopped = []
self.emulator = emulator
self.CurrentRoutingPaths = []
self.hObjs = []
self.JsonEntries = []
self.CLCBucketIdList = []
self.iperf_log_tag = ""
def scratchCopy(self):
cntmp = self.cn.copy()
cntmp.cleanup()
return CPFlex(filename=None, cn=cntmp)
def cpgreedy(self):
tstart = time.time()
if self.state == "INVALID NETWORK":
print "Error: Invalid network!"
exit(1)
self.banlist = []
self.iterations += 1
# for simulation stats
self.newCLCcontrols = 0
self.newFlowSats = 0
self.cleanedUpCLCcontrols = 0
self.lastruntime = 0
self.lastiperfstoptime = 0
if self.iterations > 1 and self.flexOperation == True:
self.updateVCRatio()
while len(self.uncontrolledCLCs) > 0:
v = self.uncontrolledCLCs[0]
tmp = self.findCRC(v)
if tmp == False:
self.remCLC(v)
self.uncontrolledCLCs.remove(v)
if self.LL_execution == True:
self.lowload()
elif len(self.Controlled) < len(self.cn.V):
self.browseCurrentCLCs()
if len(self.Controlled) < len(self.cn.V):
self.state = "NOT SOLVED"
self.globalOption = "neighbors"
while len(self.Controlled) < len(self.cn.V):
if len(self.CLCs) == len(self.cn.C):
if self.iterations > 1 and self.flexOperation == True:
self.browseCurrentCLCs()
self.forceControl()
break
self.findCLC(self.globalOption)
if len(self.Controlled) == len(self.cn.V):
self.state = "Solved"
if len(self.CLCs) < len(self.cn.C):
self.globalOption = "flows"
while len(self.Satisfied) < len(self.cn.F):
tmpsat = len(self.Satisfied)
self.findCLC(self.globalOption)
if tmpsat == len(self.Satisfied):
self.newCLCcontrols -= len(self.cn.G.node[self.CLCs[-1]]['CLCcontrol'])
self.banlist.append(self.CLCs[-1])
self.remCLC(self.CLCs[-1])
if len(self.CLCs) + len(self.banlist) == len(self.cn.C):
break
self.cleanupCLCcontrols(self.cn.V)
tend = time.time()
self.lastruntime = tend - tstart - self.lastiperfstoptime
def findCLC(self, option):
# tstart = time.time()
candidates = self.getCLCcandidates(option)
# tend = time.time()
# print "Candidate-Runtime: " + str(tend-tstart)
for v in candidates:
tmp = self.findCRC(v)
if tmp == True:
self.addNewCLC(v)
break
def forceControl(self): # force control of uncontrolled nodes to nearest CLC
uncontrolled = list(set(self.cn.V) - set(self.Controlled))
removed_flows = []
for v in uncontrolled:
CLCtmp = list(self.CLCs)
CLCtmp.sort(key=lambda c: len(nx.shortest_path(self.cn.G, source=c, target=v)))
c = CLCtmp[0]
path = nx.shortest_path(self.cn.G, source=c, target=v)
ftmp = list(self.cn.G.node[c]['Satisfies'])
ftmp.sort(key=lambda f: self.cn.G.node[c]['ProcFlow'][f], reverse=True)
for f in ftmp:
removed_flows.append((c,f))
self.remFlowSat(f)
tmp = self.checkCLC(path)
if tmp == True:
self.addCLCcontrol(path)
break
elif tmp == 2: # processing capacity is fine, now clear path
for i in range(0,len(path)-1):
ftmp2 = [f for f in self.cn.F if self.flowUsesLink(f,path[i],path[i+1]) == True]
ftmp2.sort(key=lambda f: self.cn.fdata[f]['b_flow'], reverse=True)
for x in ftmp2:
removed_flows.append((self.cn.fdata[f]['CLC'],f))
self.remFlowSat(f)
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] >= self.cn.b_CLC:
break
self.addCLCcontrol(path)
break
for c,f in removed_flows: # add back flows if possible
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
def getCLCcandidates(self, option=None):
candidates = list(set(self.cn.C) - (set(self.CLCs) | set(self.banlist)))
# avoid CRCs to be used as CLCs as long as possible
if len(set(candidates) - set(self.CRCs)) > 0:
candidates = [c for c in candidates if not c in self.CRCs]
remaining_nodes = set(self.cn.V) - set(self.Controlled)
remaining_flows = set(self.cn.F) - set(self.Satisfied)
if option == "neighbors":
ctmp = [(k, len((set([k]) | set(self.cn.G.neighbors(k))) - set(self.Controlled))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_nodes"
candidates = self.getCLCcandidates("isolated_nodes")
elif option == "isolated_nodes":
paths = []
for i in remaining_nodes:
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=i))
paths.sort(key=len)
candidates = []
for p in paths:
if not p[0] in candidates:
candidates.append(p[0])
elif option == "flows":
ctmp = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_flows"
candidates = self.getCLCcandidates("isolated_flows")
elif option == "flows_nn": # CAUTION: very slow for many flows in the network! Currently not used.
ctmp = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied)) + sum(len(set(self.cn.Wf[j]) - set(self.Satisfied)) for j in self.cn.G.neighbors(k))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
bestvalue = ctmp[0][1]
if bestvalue > 0:
candidates = [x[0] for x in ctmp]
else:
self.globalOption = "isolated_flows"
candidates = self.getCLCcandidates("isolated_flows")
elif option == "isolated_flows":
paths = []
for f in remaining_flows:
for i in self.cn.Wb[f]:
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=i))
paths.sort(key=len)
candidates = []
for p in paths:
if not p[0] in candidates:
candidates.append(p[0])
elif option == "isolated_flows2": # just a test, currently not used.
nodes_with_flows = [(k, len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in self.cn.V]
nodes_with_flows.sort(key=lambda x: x[1], reverse=True)
node_with_most_flows = nodes_with_flows[0][0]
paths = []
for j in candidates:
paths.append(nx.shortest_path(self.cn.G, source=j, target=node_with_most_flows))
paths.sort(key=len)
candidates = [p[0] for p in paths]
elif option == "neighbors_and_flows": # currently not used.
ctmp = [(k, len(set([k]) | set(self.cn.G.neighbors(k)) - set(self.Controlled)) + len(set(self.cn.Wf[k]) - set(self.Satisfied))) for k in candidates]
ctmp.sort(key=lambda x: x[1], reverse=True)
candidates = [x[0] for x in ctmp]
return candidates
def addNewCLC(self, v):
paths = []
pf = set([])
nc = 0
nnc = 0
fs = 0
tmp = self.checkCLC([v])
if tmp == True:
nc += 1
if v not in self.Controlled:
nnc += 1
self.addCLCcontrol([v])
pf = self.updatePotentialFlows(pf, v, [v])
else:
self.cn.C.remove(v)
for i in self.cn.V:
if i <> v and (i not in self.Controlled or len(set(self.cn.Wf[i]) - set(self.Satisfied)) > 0):
paths.append((nx.shortest_path(self.cn.G, source=v, target=i), i in self.Controlled, len(set(self.cn.Wf[i]) - set(self.Satisfied))))
if len(self.Controlled) < len(self.cn.V):
paths.sort(key=lambda x: x[1])
paths.sort(key=lambda x: len(x[0]))
notyetsolved = True
else:
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
while (len(paths) > 0 or len(pf) > 0) and (len(self.Controlled) < len(self.cn.V) or len(self.Satisfied) < len(self.cn.F)):
# print "Controlled: " + str(len(self.Controlled)) + " / " + str(len(self.cn.V)) + " Satisfied: " + str(len(self.Satisfied)) + " / " + str(len(self.cn.F))
# print "Current CLC: " + str(v) + " NNC: " + str(nnc) + " VCRatio: " + str(self.VCRatio)
# time.sleep(0.1)
if notyetsolved and len(self.Controlled) == len(self.cn.V):
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
if (len(pf) > 0 and (nnc >= self.VCRatio or len(self.Controlled) == len(self.cn.V))) or len(paths) == 0:
f = self.getFlow(pf, self.getFlowOption)
tmp = self.checkFlowSat(v, f)
if tmp == True:
self.addFlowSat(v, f)
fs += 1
pf.remove(f)
else:
if len(paths) == 0:
break
p = list(paths[0][0])
del paths[0]
tmp = self.checkCLC(p)
if tmp == True:
nc += 1
if p[-1] not in self.Controlled:
nnc += 1
self.addCLCcontrol(p)
pf = self.updatePotentialFlows(pf, v, [p[-1]])
elif tmp > 2: # issue is exhausted processing capacity
break
def updateVCRatio(self):
self.VCRatio = (len(self.cn.V) - len(self.Controlled)) / len(self.cn.C)
def updatePotentialFlows(self, pf, v, nn):
cpf = set([])
for w in nn:
cpf = cpf | set(self.cn.Wf[w])
for f in cpf:
if self.cn.fdata[f]['isSat'] == False and set(self.cn.Wb[f]) <= set(self.cn.G.node[v]['CLCcontrol']):
pf.add(f)
return pf
def getFlow(self, pf, option):
tmp = list(pf)
if len(tmp) == 0:
return None
else:
if option == "MostDemanding":
tmp.sort(key=lambda f: self.cn.fdata[f]['p_flow'], reverse=True)
elif option == "LeastDemanding":
tmp.sort(key=lambda f: self.cn.fdata[f]['p_flow'])
return tmp[0]
def findCRC(self, v):
# check already active CRCs first
paths = []
for i in self.CRCs:
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
paths.sort(key=len)
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
# need to add a new CRC, at first try to avoid active CLCs and CLC candidate
return self.findNewCRC(v)
def findNewCRC(self, v):
paths = []
for i in list(set(self.cn.C) - (set(self.CRCs) | set(self.CLCs) | set([v]))):
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
if len(self.CRCs) == 0: # first CRC should be placed centrally
paths.sort(key=lambda p: sum(len(nx.shortest_path(self.cn.G, source=p[0], target=c)) for c in self.cn.C))
else:
paths.sort(key=lambda p: len(p))
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
# last option: try CLC candidate, then already active CLCs
if self.checkCRC([v]) == True:
self.addCRCcontrol([v])
return True
paths = []
for i in self.CLCs:
paths.append(nx.shortest_path(self.cn.G, source=i, target=v))
paths.sort(key=len)
for p in paths:
if self.checkCRC(p) == True:
self.addCRCcontrol(p)
return True
return False
# checks if a certain CRC control can be established
def checkCRC(self,path):
v = path[0]
if sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)) + self.cn.p_CRC/self.cn.G.node[v]['p_rem'] > self.cn.l_CRC:
return False
for i in range(0,len(path)-1):
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] < self.cn.b_CRC:
return False
return True
# checks if a certain CLC control can be established
def checkCLC(self,path):
v = path[0]
if sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)) + self.cn.p_CLC/self.cn.G.node[v]['p_rem'] > self.cn.l_CLC:
return 4
for i in range(0,len(path)-1):
# reserve capacity for 2 CRC assignments (might be beneficial later)
if self.cn.G.edge[path[i]][path[i+1]]['b_rem'] - 2*self.cn.b_CRC < self.cn.b_CLC:
return 2
return True
# checks if a flow f can be satisfied by a controller v
def checkFlowSat(self, v, f):
if self.cn.fdata[f]['isSat'] == True:
return False
if not set(self.cn.Wb[f]) <= set(self.cn.G.node[v]['CLCcontrol']):
return False
flowedgecount = {}
for k in self.cn.Wb[f]:
path = self.cn.G.node[v]['CLCpaths'][k]
pathLat = sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1))
for i in range(0,len(path)-1):
if path[i] < path[i+1]:
edgekey = (path[i],path[i+1])
else:
edgekey = (path[i+1],path[i])
if not edgekey in flowedgecount:
flowedgecount[edgekey] = 1
else:
flowedgecount[edgekey] += 1
if pathLat + self.cn.fdata[f]['p_flow']/self.cn.G.node[v]['p_rem'] > self.cn.fdata[f]['l_flow']:
return False
for e in flowedgecount:
# reserve capacity for 2 CRC and 2 CLC assignments (complete control structure more important than flow satisfaction)
if self.cn.G.edge[e[0]][e[1]]['b_rem'] - 2*self.cn.b_CRC - 2*self.cn.b_CLC < flowedgecount[(e[0],e[1])] * self.cn.fdata[f]['b_flow']:
return False
return True
def addCRCcontrol(self, path):
v = path[0]
w = path[-1]
if w in self.cn.G.node[v]['CRCcontrol']:
print "Critical error: Tried to add allready existing CRC control!"
exit(1)
for i in range(0,len(path)-1):
self.cn.G.edge[path[i]][path[i+1]]['b_rem'] -= self.cn.b_CRC
self.cn.G.node[v]['p_rem'] -= self.cn.p_CRC/(self.cn.l_CRC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['ProcCRC'][w] = self.cn.p_CRC/(self.cn.l_CRC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['CRCcontrol'].append(w)
self.cn.G.node[v]['CRCpaths'][w] = path
self.cn.G.node[v]['isCRC'] = True
self.cn.G.node[w]['CRC'] = v
self.cn.G.node[w]['pathtoCRC'] = path
if v not in self.CRCs:
self.CRCs.append(v)
if self.cn.G.node[w]['isCLC'] == True and w in self.uncontrolledCLCs:
self.uncontrolledCLCs.remove(w)
if len(self.uncontrolledCLCs) == 0 and len(self.Controlled) == len(self.cn.V):
self.state = "Solved"
def addCLCcontrol(self, path):
self.newCLCcontrols += 1
v = path[0]
w = path[-1]
if w in self.cn.G.node[v]['CLCcontrol']:
print "Critical error: Tried to add allready existing CLC control!"
exit(1)
for i in range(0,len(path)-1):
self.cn.G.edge[path[i]][path[i+1]]['b_rem'] -= self.cn.b_CLC
self.cn.G.node[v]['p_rem'] -= self.cn.p_CLC/(self.cn.l_CLC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['ProcCLC'][w] = self.cn.p_CLC/(self.cn.l_CLC - sum(2*self.cn.G.edge[path[i]][path[i+1]]['l_cap'] for i in range(0,len(path)-1)))
self.cn.G.node[v]['CLCcontrol'].append(w)
self.cn.G.node[v]['CLCpaths'][w] = path
self.cn.G.node[v]['isCLC'] = True
self.cn.G.node[w]['CLCs'].append(v)
self.cn.G.node[w]['pathtoCLC'][v] = path
if v not in self.CLCs:
self.CLCs.append(v)
if w not in self.Controlled:
self.Controlled.append(w)
def addFlowSat(self, v, f):
self.newFlowSats += 1
self.cn.fdata[f]['isSat'] = True
self.cn.fdata[f]['CLC'] = v
flowpaths = [self.cn.G.node[v]['CLCpaths'][k] for k in self.cn.Wb[f]]
for path in flowpaths:
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] -= self.cn.fdata[f]['b_flow']
maxpathlatency = max([sum(2 * self.cn.G.edge[path[i]][path[i + 1]]['l_cap'] for i in range(0, len(path) - 1)) for path in flowpaths])
self.cn.G.node[v]['p_rem'] -= self.cn.fdata[f]['p_flow'] / (self.cn.fdata[f]['l_flow'] - maxpathlatency)
self.cn.G.node[v]['ProcFlow'][f] = self.cn.fdata[f]['p_flow'] / (self.cn.fdata[f]['l_flow'] - maxpathlatency)
self.cn.G.node[v]['Satisfies'].append(f)
self.Satisfied.append(f)
def remCRC(self, v):
self.cn.G.node[v]['isCRC'] = False
tmp = list(self.cn.G.node[v]['CRCcontrol'])
for w in tmp:
self.remCRCcontrol(v, w)
self.CRCs.remove(v)
def remCRCcontrol(self, v, w):
if not w in self.cn.G.node[v]['CRCcontrol']:
print "Critical error: Tried to remove non-existing CRC control!"
exit(1)
path = self.cn.G.node[v]['CRCpaths'][w]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.b_CRC
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcCRC'][w]
del self.cn.G.node[v]['ProcCRC'][w]
self.cn.G.node[v]['CRCcontrol'].remove(w)
del self.cn.G.node[v]['CRCpaths'][w]
self.cn.G.node[w]['CRC'] = None
self.cn.G.node[w]['pathtoCRC'] = None
if self.cn.G.node[w]['isCLC'] == True:
self.uncontrolledCLCs.append(w)
self.state = "NOT SOLVED"
if len(self.cn.G.node[v]['CRCcontrol']) == 0:
self.remCRC(v)
def remCLC(self, v):
tmp = list(self.cn.G.node[v]['Satisfies'])
for f in tmp:
self.remFlowSat(f)
tmp = list(self.cn.G.node[v]['CLCcontrol'])
for w in tmp:
self.remCLCcontrol(v, w)
self.cn.G.node[v]['isCLC'] = False
if self.cn.G.node[v]['CRC'] is not None:
self.remCRCcontrol(self.cn.G.node[v]['CRC'], v)
self.CLCs.remove(v)
def remCLCcontrol(self, v, w):
if not w in self.cn.G.node[v]['CLCcontrol']:
print "Critical error: Tried to remove non-existing CLC control!"
exit(1)
path = self.cn.G.node[v]['CLCpaths'][w]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.b_CLC
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcCLC'][w]
del self.cn.G.node[v]['ProcCLC'][w]
self.cn.G.node[v]['CLCcontrol'].remove(w)
del self.cn.G.node[v]['CLCpaths'][w]
self.cn.G.node[w]['CLCs'].remove(v)
del self.cn.G.node[w]['pathtoCLC'][v]
if len(self.cn.G.node[w]['CLCs']) == 0:
self.Controlled.remove(w)
self.state = "NOT SOLVED"
def remFlowSat(self, f, stopiperf=True):
v = self.cn.fdata[f]['CLC']
for k in self.cn.Wb[f]:
path = self.cn.G.node[v]['CLCpaths'][k]
for i in range(0, len(path) - 1):
self.cn.G.edge[path[i]][path[i + 1]]['b_rem'] += self.cn.fdata[f]['b_flow']
self.cn.G.node[v]['p_rem'] += self.cn.G.node[v]['ProcFlow'][f]
del self.cn.G.node[v]['ProcFlow'][f]
self.cn.G.node[v]['Satisfies'].remove(f)
if stopiperf == True:
tstopstart = time.time()
self.stopTrafficGenerationForSingleFlow(fid=f)
tstopend = time.time()
self.lastiperfstoptime += tstopend - tstopstart
self.cn.fdata[f]['isSat'] = False
self.cn.fdata[f]['CLC'] = None
self.cn.fdata[f]['isGen'] = False
self.Satisfied.remove(f)
def addFlow(self, stime=0, dur=None, amount=1):
for i in range(1, amount + 1):
self.cn.addFlow(stime=stime, dur=dur)
if self.flexOperation and len(self.CLCs) > 0:
if amount == 1:
self.browseCurrentCLCsforSingleFlow(self.cn.F[-1])
else:
self.browseCurrentCLCs()
self.checkLowload()
def remFlow(self, f, stopiperf=True):
# self.cn.fdata[-f] = copy.deepcopy(self.cn.fdata[f]) # uncomment ONLY for debbuging!
if self.cn.fdata[f]['isSat'] == True:
self.remFlowSat(f, stopiperf)
tmplist = list(self.cn.Wb[f])
self.cn.remFlow(f)
if self.flexOperation:
self.cleanupCLCcontrols(tmplist)
def clearFlows(self):
tmp = list(self.cn.F)
for f in tmp:
self.remFlow(f)
def browseCurrentCLCs(self):
self.updateVCRatio()
paths = []
pf = {}
nnc = {}
for v in self.CLCs:
pf[v] = self.updatePotentialFlows(set([]), v, self.cn.G.node[v]['CLCcontrol'])
nnc[v] = 0
for i in list(set(self.cn.V) - set(self.cn.G.node[v]['CLCcontrol'])):
if i not in self.Controlled or len(set(self.cn.Wf[i]) - set(self.Satisfied)) > 0:
paths.append((nx.shortest_path(self.cn.G, source=v, target=i), i in self.Controlled, len(set(self.cn.Wf[i]) - set(self.Satisfied))))
if len(self.Controlled) < len(self.cn.V):
paths.sort(key=lambda x: x[1])
paths.sort(key=lambda x: len(x[0]))
notyetsolved = True
else:
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
while (len(paths) > 0 or sum(len(pf[v]) for v in pf) > 0) and (len(self.Controlled) < len(self.cn.V) or len(self.Satisfied) < len(self.cn.F)):
if notyetsolved and len(self.Controlled) == len(self.cn.V):
paths.sort(key=lambda x: len(x[0]))
paths.sort(key=lambda x: x[2], reverse=True)
notyetsolved = False
if sum(len(pf[v]) for v in pf) > 0 and (len(paths) == 0 or len(self.Controlled) == len(self.cn.V)):
currv = [v for v in pf if len(pf[v]) > 0][0]
else:
currv = paths[0][0][0]
if self.CLCload(currv) > 0.999:
pf[currv] = set([])
paths = [p for p in paths if p[0][0] <> currv]
elif len(pf[currv]) > 0 and (len(paths) == 0 or nnc[currv] >= self.VCRatio or len(self.Controlled) == len(self.cn.V)):
f = self.getFlow(pf[currv], self.getFlowOption)
flowsat = False
tmp = self.checkFlowSat(currv, f)
if tmp == True:
self.addFlowSat(currv, f)
flowsat = True
if flowsat == True:
for w in pf:
if f in pf[w]:
pf[w].remove(f)
else:
pf[currv].remove(f)
else:
if len(paths) == 0:
break
p = list(paths[0][0])
del paths[0]
tmp = self.checkCLC(p)
if tmp == True:
if p[-1] not in self.Controlled:
nnc[currv] += 1
self.addCLCcontrol(p)
pf[currv] = self.updatePotentialFlows(pf[currv], currv, [p[-1]])
elif tmp > 2:
paths = [p for p in paths if p[0][0] <> currv]
pf[currv] = set([])
def browseCurrentCLCsforSingleFlow(self,f):
CLCstmp = list([c for c in self.CLCs if set(self.cn.Wb[f]) <= set(self.cn.G.node[c]['CLCcontrol'])])
CLCstmp.sort(key=lambda c: sum(len(set(self.cn.G.node[c]['Satisfies']) & set(self.cn.Wf[v])) for v in self.cn.Wb[f]), reverse=True)
CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f]))
for c in CLCstmp:
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
return 1
CLCstmp = list([c for c in self.CLCs if not set(self.cn.Wb[f]) <= set(self.cn.G.node[c]['CLCcontrol'])])
#CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f]))
#CLCstmp.sort(key=lambda c: len(set(self.cn.Wb[f]) - set(self.cn.G.node[c]['CLCcontrol'])))
CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f] if not i in self.cn.G.node[c]['CLCcontrol']))
for c in CLCstmp:
paths = (nx.shortest_path(self.cn.G, source=c, target=i) for i in self.cn.Wb[f] if not i in self.cn.G.node[c]['CLCcontrol'])
for p in paths:
tmp = self.checkCLC(p)
if tmp == True:
self.addCLCcontrol(p)
else:
break
if tmp == True:
flowsat = False
tmp = self.checkFlowSat(c,f)
if tmp == True:
self.addFlowSat(c,f)
flowsat = True
if flowsat == False:
for p in paths:
self.remCLCcontrol(c,p[-1])
def rearrangeCLCs(self):
if len(self.CRCs) <= 1:
return 0
ctmp = list(self.CRCs)
ctmp.sort(key=lambda c: len(self.cn.G.node[c]['CRCcontrol']))
checklist = [1 for c in ctmp]
for i in range(0,len(ctmp)):
if checklist[i] == 0:
continue
c = ctmp[i]
vtmp = list(self.cn.G.node[c]['CRCcontrol'])
for v in vtmp:
for j,d in reversed(list(enumerate(ctmp))):
if j <= i:
break
else:
p = nx.shortest_path(self.cn.G, source=d, target=v)
if self.checkCRC(p) == True:
self.remCRCcontrol(c,v)
self.addCRCcontrol(p)
checklist[j] = 0
break
def cleanupCLCcontrols(self,nodelist):
vtmp = list(nodelist)
random.shuffle(vtmp)
for v in vtmp:
ctmp = list(self.cn.G.node[v]['CLCs'])
random.shuffle(ctmp)
for c in ctmp:
if c <> v and len(self.cn.G.node[v]['CLCs']) > 1 and len(set(self.cn.G.node[c]['Satisfies']) & set(self.cn.Wf[v])) == 0:
self.remCLCcontrol(c,v)
self.cleanedUpCLCcontrols += 1
def CLCload(self,c): # relative load: used for statistics
return 1.0 - self.cn.G.node[c]['p_rem']/self.cn.G.node[c]['p_node']
def absCLCload(self,c): # absolute load: used for Lowload detection
return self.cn.G.node[c]['p_node'] - self.cn.G.node[c]['p_rem']
def getAverageCLCload(self):
return sum(self.CLCload(c) for c in self.CLCs)/len(self.CLCs)
def getTotalAbsCLCload(self):
return sum(self.absCLCload(c) for c in self.CLCs)
def getCLCwithLeastAbsLoad(self):
CLCstmp = list(self.CLCs)
CLCstmp.sort(key=lambda c: self.absCLCload(c))
return CLCstmp[0]
def getCLCestimate(self):
CLCstmp = list(self.CLCs)
CLCstmp.sort(key=lambda c: self.absCLCload(c), reverse=True)
totalload = self.getTotalAbsCLCload()
est = 0
psum = 0
while psum * self.L_lowload < totalload and est < len(CLCstmp):
c = CLCstmp[est]
psum += self.cn.G.node[c]['p_node']
est += 1
return est
def updateTime(self,t):
self.current_time = t
self.checkLowload()
def checkLowload(self):
est = self.getCLCestimate()
if est < len(self.CLCs):
if self.LL_alarm == False:
self.LL_alarm = True
self.LL_alarm_time = self.current_time
if self.LL_alarm == True and self.current_time - self.LL_alarm_time > self.T_lowload:
self.LL_alarm = False
self.LL_execution = True
else:
self.LL_alarm = False
def lowload(self):
est = self.getCLCestimate()
while len(self.CLCs) > est:
self.remCLC(self.getCLCwithLeastAbsLoad())
if len(self.Satisfied) < len(self.cn.F):
self.browseCurrentCLCs()
if len(self.CRCs) > 1:
self.rearrangeCLCs()
self.LL_execution = False
def getAverageCLCpathlength(self):
return sum(len(self.cn.G.node[c]['CLCpaths'][v]) for c in self.CLCs for v in self.cn.G.node[c]['CLCcontrol']) / sum(len(self.cn.G.node[c]['CLCcontrol']) for c in self.CLCs)
def getAverageCRCpathlength(self):
return sum(len(self.cn.G.node[c]['CRCpaths'][v]) for c in self.CRCs for v in self.cn.G.node[c]['CRCcontrol']) / sum(len(self.cn.G.node[c]['CRCcontrol']) for c in self.CRCs)
def getAverageLinkUsage(self):
return sum(self.cn.G[u][v]['b_rem']/self.cn.G[u][v]['b_cap'] for u,v in self.cn.G.edges()) / self.cn.G.number_of_edges()
def CLCcontrolRatio(self):
return sum(len(self.cn.G.node[c]['CLCcontrol']) for c in self.CLCs) / len(self.cn.V)
def flowUsesLink(self,f,v,w):
if self.cn.fdata[f]['isSat'] == True:
c = self.cn.fdata[f]['CLC']
for k in self.cn.Wb[f]:
path = self.cn.G.node[k]['pathtoCLC'][c]
if any(([v,w] == path[i:i+1]) for i in xrange(len(path)-1)) or any(([w,v] == path[i:i+1]) for i in xrange(len(path)-1)):
return True
return False
def CLCoutput(self, c):
out = "Data for CLC " + str(c) + ":\n"
out += "Load: " + str(self.CLCload(c)) + "\n"
out += "p_rem: " + str(self.cn.G.node[c]['p_rem']) + ", Nodes controlled: " + str(len(self.cn.G.node[c]['CLCcontrol'])) + ", Flows satisfied: " + str(len(self.cn.G.node[c]['Satisfies'])) + "\n"
if len(self.cn.G.node[c]['Satisfies']) > 0:
out += "Biggest flow satisfied: " + str(max(self.cn.fdata[f]['p_flow'] for f in self.cn.G.node[c]['Satisfies'])) + "\n"
return out
# ------------------------------------------------------------------------------------------------------------
# Testbed Code
# ------------------------------------------------------------------------------------------------------------
def setupEmulationNetwork(self):
topo = Topo(link=TCLink, host=CPULimitedHost)
# linkopts = dict(bw=1000, delay='0ms', loss=0, use_htb=True)
linkopts = dict() # delay='0ms', use_hfsc=True
# clcpower = 0.7/len(self.cn.C) #TODO
# cpupower = 0.1/(len(self.cn.V) - len(self.cn.C))
# Add switches and associated hosts
s = 1
for n in self.cn.G.node:
hName = 'h' + str(n)
hIp = Tools.makeIP(n + 1)
hMac = Tools.makeMAC(n + 1)
# if n in self.cn.C:
# cpupower = clcpower
# hObj = topo.addHost(name=hName, ip=hIp, mac=hMac, cpu=cpupower)
hObj = topo.addHost(name=hName, ip=hIp, mac=hMac)
self.Hosts[hName] = {'obj': hObj, 'ip': hIp, 'mac': hMac}
sName = 's' + str(n)
sDpid = Tools.makeDPID(n + 1)
sListenPort = (13000 + s - 1)
switchopts = dict(listenPort=sListenPort)
sObj = topo.addSwitch(name=sName, dpid=sDpid, **switchopts)
self.Switches[sName] = {'obj': sObj, 'dpid': sDpid, 'listenport': sListenPort}
s += 1
topo.addLink(hObj, sObj, **linkopts)
for key, value in self.cn.G.edges():
sName1 = 's' + str(key)
sName2 = 's' + str(value)
sObj1 = self.Switches[sName1]['obj']
sObj2 = self.Switches[sName2]['obj']
topo.addLink(sObj1, sObj2, **linkopts)
if self.emulator == "Mininet":
# Mininet
switch = functools.partial(OVSSwitch, protocols='OpenFlow13')
net = Mininet(topo=topo, switch=switch, controller=RemoteController, host=CPULimitedHost, link=TCLink)
net.start()
# Save the Mininet object for future reference
self.TestbedNetwork = net
elif self.emulator == "MaxiNet":
# MaxiNet
cluster = maxinet.Cluster()
exp = maxinet.Experiment(cluster, topo, switch=OVSSwitch)
exp.setup()
# Save the Experiment object for future reference
for switch in exp.switches:
exp.get_worker(switch).run_cmd('ovs-vsctl -- set Bridge %s ' % switch.name + 'protocols=OpenFlow10,OpenFlow12,OpenFlow13')
self.TestbedNetwork = exp
else:
print("Error: Emulator missing!")
exit(1)
# Start iperf server in potential CLC host
for n in self.cn.G.node:
if n in self.cn.C:
hName = 'h' + str(n)
hObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
hObj.cmd("socat -T 5 UDP-LISTEN:5001,fork,reuseaddr EXEC:\'/bin/cat\' &")
# hObj.cmd("ncat -e /bin/cat -k -u -m 1000 -l 5001 &")
# hObj.cmd("iperf -u -f 'b' -s > /tmp/iperf_server_" + hName + ".log &")
# for n in self.cn.G.node:
# if n in self.cn.C:
# hName = 'h' + str(n)
# hObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
# hObj.cmd("ps -eaf|grep \"socat -T 5 UDP-LISTEN:5001,fork,reuseaddr EXEC:/bin/cat\"|grep -v \"grep socat\"|awk \'{print $2}\'|tr \'\\n\' \',\' > /tmp/socatpid.txt")
# f = open("/tmp/socatpid.txt", "r")
# line = f.readline()
# linetoken = re.split(",", line)
# for onelinetocken in linetoken:
# if len(onelinetocken) > 1 and onelinetocken not in self.ParentProcess:
# self.ParentProcess.append(onelinetocken)
# f.close()
# print self.ParentProcess
def populateNetworkLinks(self):
urlpath = "http://127.0.0.1:8080/fcapfnetworkcontroller/topology/links"
resp = requests.get(urlpath)
self.Links = resp.json()
# print self.Links
def checkRoutingPath(self, src, dst, path):
key = (src, dst, path)
self.CurrentRoutingPaths.append(key)
foundPath = key in self.RoutingPaths
if foundPath == False:
self.RoutingPaths.append(key)
return foundPath
def modifyRoutingTable(self):
for n in self.cn.G.node:
node = self.cn.G.node[n]
pathToCLC = node['pathtoCLC']
# print pathToCLC
for clc, path in pathToCLC.items():
# print path
src = clc
dst = n
foundPath = self.checkRoutingPath(src, dst, path)
if foundPath == False:
self.addRoutingPath(src, dst, path)
self.requestAddRoutingEntries()
# print "Earlier Routing Entries"
# self.RoutingEntries.sort()
# print self.RoutingEntries
self.clearObsoleteRoutingPaths()
# print "After clean Routing Entries"
# self.RoutingEntries.sort()
# print self.RoutingEntries
# print "Current Routing Entries"
# self.CurrentEntries.sort()
# print self.CurrentEntries
# Do not delete forwarding entries from switch, because there might be some flow still running,
# we will set the timeout for the entries, so that the entries will be removed automatically
# for being idle for sometime, see the function we commented the call for forward entry deletion.
self.requestDeleteRoutingEntries()
def addRoutingPath(self, src, dst, path):
links = self.Links
nw_src = Tools.makeIP(src + 1)
nw_dst = Tools.makeIP(dst + 1)
if len(path) > 1:
i = 0
while i < len(path) - 1:
n1 = path[i]
n2 = path[i + 1]
if n1 == src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
# Assuming host is always connected to the port 1 of the switch
action = 1
inport = port2
switch = node2
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 == src and n2 != dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 != src and n2 != dst:
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
elif n1 != src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
action = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
# Add route entry for outward traffic
self.addRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst, action=action)
# Add route entry for inward traffic
self.addRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src, action=inport)
else:
pass # do nothing
i += 1
def addRoutingEntry(self, switch, inport, nwsrc, nwdst, action):
dpid = "0000" + self.Switches[switch]['dpid']
# Add route entry for ARP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"action": int(action),
"dltype": "arp",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
# Add route entry for TCP
payload = {"inport": int(inport),
"action": int(action),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst}
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"action": int(action),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
def requestAddRoutingEntries(self):
try:
url = "http://127.0.0.1:8080/fcapfnetworkcontroller/flowtable/addflows"
data = json.dumps(self.JsonEntries)
resp = requests.put(url, data=data)
if resp.status_code != 200:
print "Something is wrong adding Routing entries, check controller log!!"
except:
print "Controller is not available!!"
del self.JsonEntries
self.JsonEntries = []
def clearObsoleteRoutingPaths(self):
entries = copy.deepcopy(self.RoutingPaths)
for entry in entries:
(src, dst, path) = entry
if entry not in self.CurrentRoutingPaths:
self.deleteRoutingPath(src, dst, path)
self.RoutingPaths.remove(entry)
del entries
del self.CurrentRoutingPaths
self.CurrentRoutingPaths = []
def deleteRoutingPath(self, src, dst, path):
links = self.Links
nw_src = Tools.makeIP(src + 1)
nw_dst = Tools.makeIP(dst + 1)
if len(path) > 1:
i = 0
while i < len(path) - 1:
n1 = path[i]
n2 = path[i + 1]
if n1 == src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
# Assuming host is always connected to the port 1 of the switch
action = 1
inport = port2
switch = node2
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 == src and n2 != dst:
# Assuming host is always connected to the port 1 of the switch
inport = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
switch = node1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 != src and n2 != dst:
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
node1 = 's' + str(n2)
node2 = 's' + str(path[i + 2])
port1, port2 = self.getLinkPort(links, node1, node2)
action = port1
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
elif n1 != src and n2 == dst:
# Assuming host is always connected to the port 1 of the switch
action = 1
node1 = 's' + str(n1)
node2 = 's' + str(n2)
port1, port2 = self.getLinkPort(links, node1, node2)
inport = port2
switch = node2
# Delete route entry for outward traffic
self.deleteRoutingEntry(switch=switch, inport=inport, nwsrc=nw_src, nwdst=nw_dst)
# Delete route entry for inward traffic
self.deleteRoutingEntry(switch=switch, inport=action, nwsrc=nw_dst, nwdst=nw_src)
else:
pass # do nothing
i += 1
def deleteRoutingEntry(self, switch, inport, nwsrc, nwdst):
dpid = "0000" + self.Switches[switch]['dpid']
# Delete route entry for ARP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"dltype": "arp",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
# Delete route entry for IP
entry = {
"dpid": dpid,
"payload": {
"inport": int(inport),
"dltype": "ip",
"ipsrc": nwsrc,
"ipdest": nwdst
}
}
self.JsonEntries.append(entry)
def requestDeleteRoutingEntries(self):
try:
url = "http://127.0.0.1:8080/fcapfnetworkcontroller/flowtable/delflows"
data = json.dumps(self.JsonEntries)
resp = requests.put(url, data=data)
if resp.status_code != 200:
print "Something is wrong deleting Routing entries, check controller log!!"
except:
print "Controller is not available!!"
del self.JsonEntries
self.JsonEntries = []
def checkBucketInCLC(self, clc, bucketId):
key = (clc, bucketId)
found = key in self.CLCBucketIdList
if found == False:
self.CLCBucketIdList.append(key)
return found
def generateTrafficFlow(self):
iperfcount = 0
for fid in self.cn.fdata:
flow = self.cn.fdata[fid]
if flow['isSat'] == True and flow['isGen'] == False:
self.getHostForFlow(fid)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
delay = self.cn.fdata[fid]['p_flow'] / self.cn.G.node[flow['CLC']]['ProcFlow'][fid] * 1000
bucket_id = int(round(delay * 10))
hName = "h" + str(flow['CLC'])
clcObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
clcIP = Tools.makeIP(flow['CLC'] + 1)
for hObj in self.hObjs:
(n, obj) = hObj
id = bucket_id + n * 1000
bucket_id = bucket_id + n * 65536
hexFid = "0x%0.8x" % bucket_id
if False == self.checkBucketInCLC(flow['CLC'], bucket_id):
clcObj.cmd(g_conf_script_path + " add " + hName + "-eth0 1 " + str(id) + " " + str(delay) + " " + hexFid)
obj.cmd(g_iperf_path + " -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur) + " -k " + str(bucket_id) + " > /tmp/" + self.iperf_log_tag + "iperf_client_" + str(fid) + "_" + str(bw) + "_" + str(dur) + "_" + str(obj.name) + "_to_" + str(hName) + ".log &")
self.cn.fdata[fid]['genstime'] = time.time()
self.cn.fdata[fid]['isGen'] = True
iperfcount += 1
self.TotalSatisfied.append(fid)
if fid in self.TotalUnsatisfied:
self.TotalUnsatisfied.remove(fid)
elif flow['isSat'] == False:
self.TotalUnsatisfied.append(fid)
print "Flow not satisfied : " + str(flow)
else:
pass # do nothing
return iperfcount
def generateTrafficForSingleFlow(self, fid):
flow = self.cn.fdata[fid]
if flow['isSat'] == True and flow['isGen'] == False:
self.getHostForFlow(fid)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
delay = self.cn.fdata[fid]['p_flow'] / self.cn.G.node[flow['CLC']]['ProcFlow'][fid] * 1000
bucket_id = int(round(delay * 10))
hName = "h" + str(flow['CLC'])
clcObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName)
clcIP = Tools.makeIP(flow['CLC'] + 1)
for hObj in self.hObjs:
(n,obj)=hObj
id = bucket_id + n * 1000
bucket_id = bucket_id + n * 65536
hexFid = "0x%0.8x" % bucket_id
if False == self.checkBucketInCLC(flow['CLC'], bucket_id):
clcObj.cmd(g_conf_script_path + " add " + hName + "-eth0 1 " + str(id) + " " + str(delay) + " " + hexFid)
obj.cmd(g_iperf_path + " -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur) + " -k " + str(bucket_id) + " > /tmp/" + self.iperf_log_tag + "iperf_client_" + str(fid) + "_" + str(bw) + "_" + str(dur) + "_" + str(obj.name) + "_to_" + str(hName) + ".log &")
self.cn.fdata[fid]['genstime'] = time.time()
self.cn.fdata[fid]['isGen'] = True
self.TotalSatisfied.append(fid)
if fid in self.TotalUnsatisfied:
self.TotalUnsatisfied.remove(fid)
elif flow['isSat'] == False:
self.TotalUnsatisfied.append(fid)
print "Flow not satisfied : " + str(flow)
else:
pass # do nothing
def stopTrafficGenerationForSingleFlow(self, fid):
flow = self.cn.fdata[fid]
currTime = time.time()
# giving 0.01 sec extra time assuming the processing time of to start the ipref
if (flow['duration'] > currTime - flow['genstime'] + 0.01) and flow['isSat'] == True and flow['isGen'] == True:
# print "Why I am here duration " + str(flow['duration']) + " currTime " + str(currTime) + " genstime " + str(flow['genstime']) + " difference " + str(currTime - flow['genstime'])
self.getHostForFlow(fid)
clcIP = Tools.makeIP(flow['CLC'] + 1)
dur = flow['duration']
# Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half
bw = flow['b_flow'] / 2
searchStr = "iperf -u -c " + clcIP + " -f b -b " + str(bw) + " -t " + str(dur)
for hObj in self.hObjs:
(n,obj)=hObj
obj.cmd("ps -eaf|grep \"" + searchStr + "\"|awk \'{print \"kill -9 \" $2}\'|sh")
self.cn.fdata[fid]['duration'] = flow['duration'] - (currTime - flow['genstime'])
self.cn.fdata[fid]['isGen'] = False
self.TotalFlowStopped.append(fid)
if fid in self.TotalSatisfied:
self.TotalSatisfied.remove(fid)
# print "stopped flow for " + str(fid) + " and search string is " + searchStr
def getLinkPort(self, links, node1, node2):
"""Return ports of Links between node1 and node2"""
port1 = port2 = None
dpid1 = "0000" + self.Switches[node1]['dpid']
dpid2 = "0000" + self.Switches[node2]['dpid']
for link in links:
if (dpid1, dpid2) == (link['src']['dpid'], link['dst']['dpid']):
# print link
port1 = int(link['src']['port_no'], 16)
port2 = int(link['dst']['port_no'], 16)
break
elif (dpid1, dpid2) == (link['dst']['dpid'], link['src']['dpid']):
# print link
port1 = int(link['dst']['port_no'], 16)
port2 = int(link['src']['port_no'], 16)
break
else:
pass # do nothing
return port1, port2
def getHostForFlow(self, flow):
del self.hObjs
self.hObjs = []
for n in self.cn.Wb[flow]:
hName = 'h' + str(n)
if self.emulator == "Mininet":
# Mininet
self.hObjs.append((n,self.TestbedNetwork.getNodeByName(hName)))
elif self.emulator == "MaxiNet":
# MaxiNet
self.hObjs.append((n,self.TestbedNetwork.get_node(hName)))
else:
print("Error: Emulator missing!")
exit(1)
# return hObjs
| en | 0.773979 | # pdb.set_trace() # Note: Code uses older terminology: CRC=RCA, CLC=LCA, (sometimes) Flow=DFG # will be set by updateTime if run in simulation mode # will be set at first LL_alarm # for simulation stats # tstart = time.time() # tend = time.time() # print "Candidate-Runtime: " + str(tend-tstart) # force control of uncontrolled nodes to nearest CLC # processing capacity is fine, now clear path # add back flows if possible # avoid CRCs to be used as CLCs as long as possible # CAUTION: very slow for many flows in the network! Currently not used. # just a test, currently not used. # currently not used. # print "Controlled: " + str(len(self.Controlled)) + " / " + str(len(self.cn.V)) + " Satisfied: " + str(len(self.Satisfied)) + " / " + str(len(self.cn.F)) # print "Current CLC: " + str(v) + " NNC: " + str(nnc) + " VCRatio: " + str(self.VCRatio) # time.sleep(0.1) # issue is exhausted processing capacity # check already active CRCs first # need to add a new CRC, at first try to avoid active CLCs and CLC candidate # first CRC should be placed centrally # last option: try CLC candidate, then already active CLCs # checks if a certain CRC control can be established # checks if a certain CLC control can be established # reserve capacity for 2 CRC assignments (might be beneficial later) # checks if a flow f can be satisfied by a controller v # reserve capacity for 2 CRC and 2 CLC assignments (complete control structure more important than flow satisfaction) # self.cn.fdata[-f] = copy.deepcopy(self.cn.fdata[f]) # uncomment ONLY for debbuging! #CLCstmp.sort(key=lambda c: sum(len(nx.shortest_path(self.cn.G, source=c, target=i)) for i in self.cn.Wb[f])) #CLCstmp.sort(key=lambda c: len(set(self.cn.Wb[f]) - set(self.cn.G.node[c]['CLCcontrol']))) # relative load: used for statistics # absolute load: used for Lowload detection # ------------------------------------------------------------------------------------------------------------ # Testbed Code # ------------------------------------------------------------------------------------------------------------ # linkopts = dict(bw=1000, delay='0ms', loss=0, use_htb=True) # delay='0ms', use_hfsc=True # clcpower = 0.7/len(self.cn.C) #TODO # cpupower = 0.1/(len(self.cn.V) - len(self.cn.C)) # Add switches and associated hosts # if n in self.cn.C: # cpupower = clcpower # hObj = topo.addHost(name=hName, ip=hIp, mac=hMac, cpu=cpupower) # Mininet # Save the Mininet object for future reference # MaxiNet # Save the Experiment object for future reference # Start iperf server in potential CLC host # hObj.cmd("ncat -e /bin/cat -k -u -m 1000 -l 5001 &") # hObj.cmd("iperf -u -f 'b' -s > /tmp/iperf_server_" + hName + ".log &") # for n in self.cn.G.node: # if n in self.cn.C: # hName = 'h' + str(n) # hObj = self.TestbedNetwork.get(hName) if self.emulator == "Mininet" else self.TestbedNetwork.get_node(hName) # hObj.cmd("ps -eaf|grep \"socat -T 5 UDP-LISTEN:5001,fork,reuseaddr EXEC:/bin/cat\"|grep -v \"grep socat\"|awk \'{print $2}\'|tr \'\\n\' \',\' > /tmp/socatpid.txt") # f = open("/tmp/socatpid.txt", "r") # line = f.readline() # linetoken = re.split(",", line) # for onelinetocken in linetoken: # if len(onelinetocken) > 1 and onelinetocken not in self.ParentProcess: # self.ParentProcess.append(onelinetocken) # f.close() # print self.ParentProcess # print self.Links # print pathToCLC # print path # print "Earlier Routing Entries" # self.RoutingEntries.sort() # print self.RoutingEntries # print "After clean Routing Entries" # self.RoutingEntries.sort() # print self.RoutingEntries # print "Current Routing Entries" # self.CurrentEntries.sort() # print self.CurrentEntries # Do not delete forwarding entries from switch, because there might be some flow still running, # we will set the timeout for the entries, so that the entries will be removed automatically # for being idle for sometime, see the function we commented the call for forward entry deletion. # Assuming host is always connected to the port 1 of the switch # Add route entry for outward traffic # Add route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Add route entry for outward traffic # Add route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Add route entry for outward traffic # Add route entry for inward traffic # Add route entry for outward traffic # Add route entry for inward traffic # Add route entry for outward traffic # Add route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Add route entry for outward traffic # Add route entry for inward traffic # do nothing # Add route entry for ARP # Add route entry for TCP # Assuming host is always connected to the port 1 of the switch # Delete route entry for outward traffic # Delete route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Delete route entry for outward traffic # Delete route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Delete route entry for outward traffic # Delete route entry for inward traffic # Delete route entry for outward traffic # Delete route entry for inward traffic # Delete route entry for outward traffic # Delete route entry for inward traffic # Assuming host is always connected to the port 1 of the switch # Delete route entry for outward traffic # Delete route entry for inward traffic # do nothing # Delete route entry for ARP # Delete route entry for IP # Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half # do nothing # Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half # do nothing # giving 0.01 sec extra time assuming the processing time of to start the ipref # print "Why I am here duration " + str(flow['duration']) + " currTime " + str(currTime) + " genstime " + str(flow['genstime']) + " difference " + str(currTime - flow['genstime']) # Total bandwidth is for both upload and download traffic therefore one way traffic bandwidth should be half # print "stopped flow for " + str(fid) + " and search string is " + searchStr Return ports of Links between node1 and node2 # print link # print link # do nothing # Mininet # MaxiNet # return hObjs | 1.923329 | 2 |
flask/image_process.py | jphacks/OK_1905 | 1 | 6618147 | import cv2
def canny(image):
return cv2.Canny(image, 100, 200) | import cv2
def canny(image):
return cv2.Canny(image, 100, 200) | none | 1 | 2.286398 | 2 | |
botandschedule.py | Mozart-dotSlash/reminderbot | 0 | 6618148 | from flask import Flask
from flask_restful import Resource, Api, reqparse
from telegram.ext import MessageHandler, Filters, CommandHandler, CallbackContext, Updater
from telegram import Update
import hashlib
import pymongo
from crontab import CronTab
import os
class Schedule(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('userHash', required=True)
parser.add_argument('taskName', required=True)
parser.add_argument('taskID', required=True)
parser.add_argument('task', required=True)
parser.add_argument('time', required=True)
args = parser.parse_args()
cron = CronTab(user=os.environ['USER'])
time = args['taskID'].split()
job = cron.new(
command=f"python3 messenger.py {args['userHash']} {args['task']} {args['taskID']}", comment=args['taskID'])
scheduletime = time[0] + " " + time[1] + \
" " + time[2] + " " + time[3] + " *"
job.setall(scheduletime)
cron.write()
class Done(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('userHash', required=True)
parser.add_argument('taskID', required=True)
args = parser.parse_args()
cron = CronTab(user=os.environ['USER'])
for job in cron:
if job.comment == args['taskID']:
cron.remove(job)
cron.write()
app = Flask(__name__)
api = Api(app)
api.add_resource(Schedule, '/schedule')
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
userHashDb = myclient["UserHashDatabase"]
userCollection = userHashDb['users']
updater = Updater(
token=os.environ['TELEGRAM_TOKEN'], use_context=True)
dispatcher = updater.dispatcher
def start(update: Update, context: CallbackContext):
current_id = update.effective_chat.id
hashval = hashlib.sha256(str(current_id).encode()).hexdigest()
if userCollection.count_documents(filter={"hash": hashval}) == 0:
userCollection.insert_one({"hash": hashval, 'userid': str(current_id)})
message = "Hey welcome to Symphony!!\nPaste this code in vscode to register\n\n" + hashval
context.bot.send_message(
chat_id=current_id, text=message)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
updater.start_polling()
if __name__ == '__main__':
app.run()
| from flask import Flask
from flask_restful import Resource, Api, reqparse
from telegram.ext import MessageHandler, Filters, CommandHandler, CallbackContext, Updater
from telegram import Update
import hashlib
import pymongo
from crontab import CronTab
import os
class Schedule(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('userHash', required=True)
parser.add_argument('taskName', required=True)
parser.add_argument('taskID', required=True)
parser.add_argument('task', required=True)
parser.add_argument('time', required=True)
args = parser.parse_args()
cron = CronTab(user=os.environ['USER'])
time = args['taskID'].split()
job = cron.new(
command=f"python3 messenger.py {args['userHash']} {args['task']} {args['taskID']}", comment=args['taskID'])
scheduletime = time[0] + " " + time[1] + \
" " + time[2] + " " + time[3] + " *"
job.setall(scheduletime)
cron.write()
class Done(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('userHash', required=True)
parser.add_argument('taskID', required=True)
args = parser.parse_args()
cron = CronTab(user=os.environ['USER'])
for job in cron:
if job.comment == args['taskID']:
cron.remove(job)
cron.write()
app = Flask(__name__)
api = Api(app)
api.add_resource(Schedule, '/schedule')
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
userHashDb = myclient["UserHashDatabase"]
userCollection = userHashDb['users']
updater = Updater(
token=os.environ['TELEGRAM_TOKEN'], use_context=True)
dispatcher = updater.dispatcher
def start(update: Update, context: CallbackContext):
current_id = update.effective_chat.id
hashval = hashlib.sha256(str(current_id).encode()).hexdigest()
if userCollection.count_documents(filter={"hash": hashval}) == 0:
userCollection.insert_one({"hash": hashval, 'userid': str(current_id)})
message = "Hey welcome to Symphony!!\nPaste this code in vscode to register\n\n" + hashval
context.bot.send_message(
chat_id=current_id, text=message)
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
updater.start_polling()
if __name__ == '__main__':
app.run()
| none | 1 | 2.45332 | 2 | |
utils/interfaces/_library/miscs.py | DataDog/system-tests | 3 | 6618149 | <filename>utils/interfaces/_library/miscs.py
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
""" Misc validations """
import re
from collections import Counter
from utils.tools import m
from utils.interfaces._core import BaseValidation
from utils.interfaces._library._utils import get_root_spans, _get_rid_from_span
class _TraceIdUniqueness(BaseValidation):
path_filters = r"/v[0-9]\.[0-9]+/traces" # Should be implemented independently from the endpoint version
is_success_on_expiry = False # I need at least one value to be validated
def __init__(self, uniqueness_exceptions):
super().__init__()
self.traces_ids = Counter()
self.uniqueness_exceptions = uniqueness_exceptions
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"For {data['log_filename']}, traces shoud be an array")
return
for trace in data["request"]["content"]:
if len(trace):
span = trace[0]
self.is_success_on_expiry = True
if "trace_id" not in span:
self.set_failure(f"Can't find trace_id in request {data['log_filename']}")
else:
trace_id = span["trace_id"]
self.traces_ids[trace_id] += 1
def final_check(self):
for trace_id, count in self.traces_ids.items():
if count > 1 and self.uniqueness_exceptions.should_be_unique(trace_id):
self.log_error(f"Found duplicate trace id {trace_id}")
class _ReceiveRequestRootTrace(BaseValidation):
"""Asserts that a trace for a request has been sent to the agent"""
path_filters = ["/v0.4/traces"]
is_success_on_expiry = False
def check(self, data):
for root_span in get_root_spans(data["request"]["content"]):
if root_span.get("type") != "web":
continue
self.set_status(True)
def set_expired(self):
super().set_expired()
if not self.is_success:
self.log_error(
f'Validation "{self.message}", nothing has been reported. No request root span with has been found'
)
class _TracesValidation(BaseValidation):
""" will run an arbitrary check on traces. Validator function can :
* returns true => validation will be validated at the end (but trace will continue to be checked)
* returns False or None => nothing is done
* raise an exception => validation will fail
"""
path_filters = r"/v0\.[1-9]+/traces"
def __init__(self, validator, is_success_on_expiry):
super().__init__()
self.is_success_on_expiry = is_success_on_expiry
self.validator = validator
def check(self, data):
try:
if self.validator(data):
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(f"{m(self.message)} not validated: {e}\npayload is: {data['log_filename']}")
class _SpanValidation(BaseValidation):
""" will run an arbitrary check on spans. If a request is provided, only span
related to this request will be checked.
Validator function can :
* returns true => validation will be validated at the end (but trace will continue to be checked)
* returns False or None => nothing is done
* raise an exception => validation will fail
"""
path_filters = "/v0.4/traces"
def __init__(self, request, validator, is_success_on_expiry):
super().__init__(request=request)
self.validator = validator
self.is_success_on_expiry = is_success_on_expiry
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"In {data['log_filename']}, traces should be an array")
return # do not fail, it's schema's job
for trace in data["request"]["content"]:
for span in trace:
if self.rid:
if self.rid != _get_rid_from_span(span):
continue
self.log_debug(f"Found a trace for {m(self.message)}")
try:
if self.validator(span):
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(f"{m(self.message)} not validated: {e}\nSpan is: {span}")
class _SpanTagValidation(BaseValidation):
""" will run an arbitrary check on spans. If a request is provided, only span
"""
path_filters = "/v0.4/traces"
def __init__(self, request, tags, value_as_regular_expression):
super().__init__(request=request)
self.tags = tags
self.value_as_regular_expression = value_as_regular_expression
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"In {data['log_filename']}, traces should be an array")
return # do not fail, it's schema's job
for trace in data["request"]["content"]:
for span in trace:
if self.rid:
if self.rid != _get_rid_from_span(span):
continue
self.log_debug(f"Found a trace for {m(self.message)}")
try:
for tagKey in self.tags:
if tagKey not in span["meta"]:
raise Exception(f"{tagKey} tag not found in span's meta")
expectValue = self.tags[tagKey]
actualValue = span["meta"][tagKey]
if self.value_as_regular_expression:
if not re.compile(expectValue).fullmatch(actualValue):
raise Exception(
f'{tagKey} tag value is "{actualValue}", and should match regex "{expectValue}"'
)
else:
if expectValue != actualValue:
raise Exception(
f'{tagKey} tag in span\'s meta should be "{expectValue}", not "{actualValue}"'
)
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(
f"{m(self.message)} not validated in {data['log_filename']}:\n{e}\nSpan is: {span}"
)
class _TraceExistence(BaseValidation):
def __init__(self, request, span_type=None):
super().__init__(request=request)
self.span_type = span_type
path_filters = "/v0.4/traces"
def check(self, data):
if not isinstance(data["request"]["content"], list):
# do not fail here, it's schema's job, simply ignore it
self.log_error(f"{data['log_filename']} content should be an array")
return
diagnostics = ["Diagnostics:"]
span_types = []
span_count = len(span_types)
for trace in data["request"]["content"]:
for span in trace:
if self.rid == _get_rid_from_span(span):
for correlated_span in trace:
span_count = span_count + 1
span_types.append(correlated_span.get("type"))
diagnostics.append(str(correlated_span))
continue
if span_count > 0:
if self.span_type is None:
self.log_debug(f"Found a trace for {self.message}")
self.set_status(True)
elif self.span_type in span_types:
self.log_debug(f"Found a span with type {self.span_type}")
self.set_status(True)
else:
self.log_error(f"Did not find span type '{self.span_type}' in reported span types: {span_types}")
self.log_error("\n".join(diagnostics))
| <filename>utils/interfaces/_library/miscs.py
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
""" Misc validations """
import re
from collections import Counter
from utils.tools import m
from utils.interfaces._core import BaseValidation
from utils.interfaces._library._utils import get_root_spans, _get_rid_from_span
class _TraceIdUniqueness(BaseValidation):
path_filters = r"/v[0-9]\.[0-9]+/traces" # Should be implemented independently from the endpoint version
is_success_on_expiry = False # I need at least one value to be validated
def __init__(self, uniqueness_exceptions):
super().__init__()
self.traces_ids = Counter()
self.uniqueness_exceptions = uniqueness_exceptions
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"For {data['log_filename']}, traces shoud be an array")
return
for trace in data["request"]["content"]:
if len(trace):
span = trace[0]
self.is_success_on_expiry = True
if "trace_id" not in span:
self.set_failure(f"Can't find trace_id in request {data['log_filename']}")
else:
trace_id = span["trace_id"]
self.traces_ids[trace_id] += 1
def final_check(self):
for trace_id, count in self.traces_ids.items():
if count > 1 and self.uniqueness_exceptions.should_be_unique(trace_id):
self.log_error(f"Found duplicate trace id {trace_id}")
class _ReceiveRequestRootTrace(BaseValidation):
"""Asserts that a trace for a request has been sent to the agent"""
path_filters = ["/v0.4/traces"]
is_success_on_expiry = False
def check(self, data):
for root_span in get_root_spans(data["request"]["content"]):
if root_span.get("type") != "web":
continue
self.set_status(True)
def set_expired(self):
super().set_expired()
if not self.is_success:
self.log_error(
f'Validation "{self.message}", nothing has been reported. No request root span with has been found'
)
class _TracesValidation(BaseValidation):
""" will run an arbitrary check on traces. Validator function can :
* returns true => validation will be validated at the end (but trace will continue to be checked)
* returns False or None => nothing is done
* raise an exception => validation will fail
"""
path_filters = r"/v0\.[1-9]+/traces"
def __init__(self, validator, is_success_on_expiry):
super().__init__()
self.is_success_on_expiry = is_success_on_expiry
self.validator = validator
def check(self, data):
try:
if self.validator(data):
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(f"{m(self.message)} not validated: {e}\npayload is: {data['log_filename']}")
class _SpanValidation(BaseValidation):
""" will run an arbitrary check on spans. If a request is provided, only span
related to this request will be checked.
Validator function can :
* returns true => validation will be validated at the end (but trace will continue to be checked)
* returns False or None => nothing is done
* raise an exception => validation will fail
"""
path_filters = "/v0.4/traces"
def __init__(self, request, validator, is_success_on_expiry):
super().__init__(request=request)
self.validator = validator
self.is_success_on_expiry = is_success_on_expiry
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"In {data['log_filename']}, traces should be an array")
return # do not fail, it's schema's job
for trace in data["request"]["content"]:
for span in trace:
if self.rid:
if self.rid != _get_rid_from_span(span):
continue
self.log_debug(f"Found a trace for {m(self.message)}")
try:
if self.validator(span):
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(f"{m(self.message)} not validated: {e}\nSpan is: {span}")
class _SpanTagValidation(BaseValidation):
""" will run an arbitrary check on spans. If a request is provided, only span
"""
path_filters = "/v0.4/traces"
def __init__(self, request, tags, value_as_regular_expression):
super().__init__(request=request)
self.tags = tags
self.value_as_regular_expression = value_as_regular_expression
def check(self, data):
if not isinstance(data["request"]["content"], list):
self.log_error(f"In {data['log_filename']}, traces should be an array")
return # do not fail, it's schema's job
for trace in data["request"]["content"]:
for span in trace:
if self.rid:
if self.rid != _get_rid_from_span(span):
continue
self.log_debug(f"Found a trace for {m(self.message)}")
try:
for tagKey in self.tags:
if tagKey not in span["meta"]:
raise Exception(f"{tagKey} tag not found in span's meta")
expectValue = self.tags[tagKey]
actualValue = span["meta"][tagKey]
if self.value_as_regular_expression:
if not re.compile(expectValue).fullmatch(actualValue):
raise Exception(
f'{tagKey} tag value is "{actualValue}", and should match regex "{expectValue}"'
)
else:
if expectValue != actualValue:
raise Exception(
f'{tagKey} tag in span\'s meta should be "{expectValue}", not "{actualValue}"'
)
self.log_debug(f"Trace in {data['log_filename']} validates {m(self.message)}")
self.is_success_on_expiry = True
except Exception as e:
self.set_failure(
f"{m(self.message)} not validated in {data['log_filename']}:\n{e}\nSpan is: {span}"
)
class _TraceExistence(BaseValidation):
def __init__(self, request, span_type=None):
super().__init__(request=request)
self.span_type = span_type
path_filters = "/v0.4/traces"
def check(self, data):
if not isinstance(data["request"]["content"], list):
# do not fail here, it's schema's job, simply ignore it
self.log_error(f"{data['log_filename']} content should be an array")
return
diagnostics = ["Diagnostics:"]
span_types = []
span_count = len(span_types)
for trace in data["request"]["content"]:
for span in trace:
if self.rid == _get_rid_from_span(span):
for correlated_span in trace:
span_count = span_count + 1
span_types.append(correlated_span.get("type"))
diagnostics.append(str(correlated_span))
continue
if span_count > 0:
if self.span_type is None:
self.log_debug(f"Found a trace for {self.message}")
self.set_status(True)
elif self.span_type in span_types:
self.log_debug(f"Found a span with type {self.span_type}")
self.set_status(True)
else:
self.log_error(f"Did not find span type '{self.span_type}' in reported span types: {span_types}")
self.log_error("\n".join(diagnostics))
| en | 0.843743 | # Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2021 Datadog, Inc. Misc validations # Should be implemented independently from the endpoint version # I need at least one value to be validated Asserts that a trace for a request has been sent to the agent will run an arbitrary check on traces. Validator function can : * returns true => validation will be validated at the end (but trace will continue to be checked) * returns False or None => nothing is done * raise an exception => validation will fail will run an arbitrary check on spans. If a request is provided, only span related to this request will be checked. Validator function can : * returns true => validation will be validated at the end (but trace will continue to be checked) * returns False or None => nothing is done * raise an exception => validation will fail # do not fail, it's schema's job will run an arbitrary check on spans. If a request is provided, only span # do not fail, it's schema's job # do not fail here, it's schema's job, simply ignore it | 2.028438 | 2 |
src/bit_counter.py | RasPat1/wordle-world-whirl | 0 | 6618150 | <filename>src/bit_counter.py
import functools
class BitCounter:
DEFAULT_BITMASK_SIZE = 50
def __init__(self, bitmask_size=DEFAULT_BITMASK_SIZE):
self.bitmask_size = bitmask_size
self.bitmask = (0b1 << bitmask_size) - 1
@functools.cache
def countSetBits(self, binary_number):
count = 0
while (binary_number):
binary_number &= (binary_number-1)
count += 1
return count
@functools.cache
def count_set_bits(self, number):
if number <= 2**self.bitmask_size + 1:
return self.countSetBits(number)
extracted_bits = number & self.bitmask
number = number >> self.bitmask_size
return self.countSetBits(extracted_bits) + self.count_set_bits(number)
@functools.cache
def count_set_bits_loop(self, number):
bit_count = 0
while number > 0:
extracted_bits = number & self.bitmask
number = number >> self.bitmask_size
bit_count += self.countSetBits(extracted_bits)
return bit_count
| <filename>src/bit_counter.py
import functools
class BitCounter:
DEFAULT_BITMASK_SIZE = 50
def __init__(self, bitmask_size=DEFAULT_BITMASK_SIZE):
self.bitmask_size = bitmask_size
self.bitmask = (0b1 << bitmask_size) - 1
@functools.cache
def countSetBits(self, binary_number):
count = 0
while (binary_number):
binary_number &= (binary_number-1)
count += 1
return count
@functools.cache
def count_set_bits(self, number):
if number <= 2**self.bitmask_size + 1:
return self.countSetBits(number)
extracted_bits = number & self.bitmask
number = number >> self.bitmask_size
return self.countSetBits(extracted_bits) + self.count_set_bits(number)
@functools.cache
def count_set_bits_loop(self, number):
bit_count = 0
while number > 0:
extracted_bits = number & self.bitmask
number = number >> self.bitmask_size
bit_count += self.countSetBits(extracted_bits)
return bit_count
| none | 1 | 3.521015 | 4 |