max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
dnd.py
|
josephcslater/Do-Not-DIsturb
| 2
|
6625351
|
<filename>dnd.py
#! /Users/jslater/anaconda3/bin/python
# $Id: dnd.py 2015-04-27 19:05:00Z $
# Author: <NAME> <<EMAIL>>
# Copyright: This script has been placed in the public domain.
# Version 1.2: Added help via --help and -h
# Version 1.1: Added ability to set by time.
import sys
import os
import subprocess
import time
from tkinter import *
"""
Change Notification Center status on Mac to "Do not disturb" for
a) n minutes, (n>4)
b) or n hours, (n<=4)
c) or until n time where n is in hour:minute format using 12 hour clock.
The concept of the split mode is that since Notification Center has a built-in
timer of 24 hours (1 day), usage of this script will be limited to 1/2 day or
less. Any longer... modify the script or just turn it off manually?
"""
# print(print_help())
__docformat__ = 'reStructuredText'
# I should have used argparse: https://docs.python.org/3/library/argparse.html
import logging
# set your log level
logging.basicConfig(level=logging.ERROR)
# logging.basicConfig(level=logging.DEBUG)
logging.debug('This is a log message')
os.environ['PATH'] = os.path.normpath(
os.environ['PATH'] + ':/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:')
a1 = subprocess.check_output(
['defaults -currentHost write ~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean true'], shell=True)
a = subprocess.check_output(['date -u +"%Y-%m-%d %H:%M:%S +0000"'], shell=True)
b = a.decode('utf-8')
a2 = subprocess.Popen(
['defaults -currentHost write ~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturbDate -date "' + b + '"'], shell=True)
logging.info('Info only. Debug mode')
#logging.warning('Warning only')
a3 = subprocess.Popen(['killall NotificationCenter'], shell=True)
curhour = time.localtime().tm_hour
curmin = time.localtime().tm_min
logging.debug(curhour)
logging.debug(curmin)
if sys.argv.__len__() == 1:
endtime = input('When do you want silence to end? ')
logging.debug(endtime)
else:
endtime = sys.argv[1]
if endtime == '--help' or endtime == '-h':
print("Changed Notification Center status on Mac to 'Do not disturb' for: \n\
a) n minutes, (n>4)\n\
b) or n hours. (n<=4)\n\
c) or until n time where n is in hour:minute format using 12 hour clock.\n\
The concept of the split mode is that since Notification Center has a built-in timer of 24 hours\
(1 day), usage of this script will be limited to 1/2 day or less.")
sys.exit()
elif str.find(endtime, ':') != -1:
colloc = str.find(endtime, ':')
hour = float(endtime[:colloc])
min = float(endtime[(colloc + 1):])
logging.debug(hour)
logging.debug(min)
if hour < 7:
hour = hour + 12
numin = (hour - curhour) * 60 + (min - curmin)
else:
numin = float(endtime)
if numin < 4.01:
numin = numin * 60
colloc = 0
logging.debug('Plan on sleeping for this many minutes.')
logging.debug(numin)
if numin > 4.01 or colloc != 0:
a5 = subprocess.Popen(['killall sleep -s;sleep ' + str(numin * 60) + ';defaults -currentHost write \
~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean false; \
defaults -currentHost delete ~/Library/Preferences/ByHost/com.apple.notificationcenterui \
doNotDisturbDate; killall NotificationCenter'], shell=True)
print('Sleeping for ' + str(numin) + ' min.')
logging.debug('Minute Mode')
else:
a5 = subprocess.Popen(['sleep ' + str(numin * 3600) + ';defaults -currentHost write \
~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean false; \
defaults -currentHost delete ~/Library/Preferences/ByHost/com.apple.notificationcenterui \
doNotDisturbDate; killall NotificationCenter'], shell=True)
numin = numin
logging.debug('Sleeping for ' + str(numin) + ' min.')
logging.warning(
'Hour Mode- to never be entered again! Please report this error.')
wakehour = curhour
logging.debug('wakehour temp')
logging.debug(wakehour)
wakemin = numin + curmin
logging.debug('wakemin temp')
logging.debug(wakemin)
while wakemin > 59:
logging.debug('swapping min for hour')
wakemin = wakemin - 60
logging.debug(wakemin)
wakehour = wakehour + 1
logging.debug(wakehour)
strwakemin = str(wakemin)
if wakehour > 12:
wakehour = wakehour - 12
logging.debug(strwakemin)
logging.debug(str.find(strwakemin, '.'))
if str.find(strwakemin, '.') == 1:
strwakemin = '0' + strwakemin
logging.debug(strwakemin)
print('Do not disturb set until ' + str(wakehour) + ':' + strwakemin[:2]
+ '. (' + str(numin)[:-2] + ' minutes.)')
|
<filename>dnd.py
#! /Users/jslater/anaconda3/bin/python
# $Id: dnd.py 2015-04-27 19:05:00Z $
# Author: <NAME> <<EMAIL>>
# Copyright: This script has been placed in the public domain.
# Version 1.2: Added help via --help and -h
# Version 1.1: Added ability to set by time.
import sys
import os
import subprocess
import time
from tkinter import *
"""
Change Notification Center status on Mac to "Do not disturb" for
a) n minutes, (n>4)
b) or n hours, (n<=4)
c) or until n time where n is in hour:minute format using 12 hour clock.
The concept of the split mode is that since Notification Center has a built-in
timer of 24 hours (1 day), usage of this script will be limited to 1/2 day or
less. Any longer... modify the script or just turn it off manually?
"""
# print(print_help())
__docformat__ = 'reStructuredText'
# I should have used argparse: https://docs.python.org/3/library/argparse.html
import logging
# set your log level
logging.basicConfig(level=logging.ERROR)
# logging.basicConfig(level=logging.DEBUG)
logging.debug('This is a log message')
os.environ['PATH'] = os.path.normpath(
os.environ['PATH'] + ':/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:')
a1 = subprocess.check_output(
['defaults -currentHost write ~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean true'], shell=True)
a = subprocess.check_output(['date -u +"%Y-%m-%d %H:%M:%S +0000"'], shell=True)
b = a.decode('utf-8')
a2 = subprocess.Popen(
['defaults -currentHost write ~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturbDate -date "' + b + '"'], shell=True)
logging.info('Info only. Debug mode')
#logging.warning('Warning only')
a3 = subprocess.Popen(['killall NotificationCenter'], shell=True)
curhour = time.localtime().tm_hour
curmin = time.localtime().tm_min
logging.debug(curhour)
logging.debug(curmin)
if sys.argv.__len__() == 1:
endtime = input('When do you want silence to end? ')
logging.debug(endtime)
else:
endtime = sys.argv[1]
if endtime == '--help' or endtime == '-h':
print("Changed Notification Center status on Mac to 'Do not disturb' for: \n\
a) n minutes, (n>4)\n\
b) or n hours. (n<=4)\n\
c) or until n time where n is in hour:minute format using 12 hour clock.\n\
The concept of the split mode is that since Notification Center has a built-in timer of 24 hours\
(1 day), usage of this script will be limited to 1/2 day or less.")
sys.exit()
elif str.find(endtime, ':') != -1:
colloc = str.find(endtime, ':')
hour = float(endtime[:colloc])
min = float(endtime[(colloc + 1):])
logging.debug(hour)
logging.debug(min)
if hour < 7:
hour = hour + 12
numin = (hour - curhour) * 60 + (min - curmin)
else:
numin = float(endtime)
if numin < 4.01:
numin = numin * 60
colloc = 0
logging.debug('Plan on sleeping for this many minutes.')
logging.debug(numin)
if numin > 4.01 or colloc != 0:
a5 = subprocess.Popen(['killall sleep -s;sleep ' + str(numin * 60) + ';defaults -currentHost write \
~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean false; \
defaults -currentHost delete ~/Library/Preferences/ByHost/com.apple.notificationcenterui \
doNotDisturbDate; killall NotificationCenter'], shell=True)
print('Sleeping for ' + str(numin) + ' min.')
logging.debug('Minute Mode')
else:
a5 = subprocess.Popen(['sleep ' + str(numin * 3600) + ';defaults -currentHost write \
~/Library/Preferences/ByHost/com.apple.notificationcenterui doNotDisturb -boolean false; \
defaults -currentHost delete ~/Library/Preferences/ByHost/com.apple.notificationcenterui \
doNotDisturbDate; killall NotificationCenter'], shell=True)
numin = numin
logging.debug('Sleeping for ' + str(numin) + ' min.')
logging.warning(
'Hour Mode- to never be entered again! Please report this error.')
wakehour = curhour
logging.debug('wakehour temp')
logging.debug(wakehour)
wakemin = numin + curmin
logging.debug('wakemin temp')
logging.debug(wakemin)
while wakemin > 59:
logging.debug('swapping min for hour')
wakemin = wakemin - 60
logging.debug(wakemin)
wakehour = wakehour + 1
logging.debug(wakehour)
strwakemin = str(wakemin)
if wakehour > 12:
wakehour = wakehour - 12
logging.debug(strwakemin)
logging.debug(str.find(strwakemin, '.'))
if str.find(strwakemin, '.') == 1:
strwakemin = '0' + strwakemin
logging.debug(strwakemin)
print('Do not disturb set until ' + str(wakehour) + ':' + strwakemin[:2]
+ '. (' + str(numin)[:-2] + ' minutes.)')
|
en
| 0.796274
|
#! /Users/jslater/anaconda3/bin/python # $Id: dnd.py 2015-04-27 19:05:00Z $ # Author: <NAME> <<EMAIL>> # Copyright: This script has been placed in the public domain. # Version 1.2: Added help via --help and -h # Version 1.1: Added ability to set by time. Change Notification Center status on Mac to "Do not disturb" for a) n minutes, (n>4) b) or n hours, (n<=4) c) or until n time where n is in hour:minute format using 12 hour clock. The concept of the split mode is that since Notification Center has a built-in timer of 24 hours (1 day), usage of this script will be limited to 1/2 day or less. Any longer... modify the script or just turn it off manually? # print(print_help()) # I should have used argparse: https://docs.python.org/3/library/argparse.html # set your log level # logging.basicConfig(level=logging.DEBUG) #logging.warning('Warning only')
| 2.25917
| 2
|
scripts_vcoco/models/model.py
|
perryshao/MSRefNet
| 0
|
6625352
|
<filename>scripts_vcoco/models/model.py<gh_stars>0
# coding=utf-8
from __future__ import print_function, division
import torch
import torch.nn as nn
import sys
sys.path.append("..")
import utils.pool_pairing as ROI
import torchvision.models as models
lin_size = 1024
ids = 80
context_size = 1024
sp_size = 1024
mul = 3
deep = 512
pool_size = (10, 10)
pool_size_pose = (18, 5, 5)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size()[0], -1)
class VSGNet(nn.Module):
def __init__(self):
super(VSGNet, self).__init__()
model = models.resnet152(pretrained=True)
self.flat = Flatten()
self.Conv_pretrain = nn.Sequential(*list(model.children())[0:7]) ## Resnets,resnext
######### Convolutional Blocks for human,objects and the context##############################
self.Conv_people = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.Conv_objects = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.Conv_context = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.lin_peo = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_obj = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_con = nn.Sequential(
nn.Linear(1028, 512),
nn.ReLU(),
)
###############################################################################################
##### Attention Feature Model######
self.conv_sp_map = nn.Sequential(
# nn.Conv2d(2, 64, kernel_size=(5, 5)),
nn.Conv2d(3, 64, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(64, 32, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.AvgPool2d((13, 13), padding=0, stride=(1, 1)),
# nn.Linear(32,1024),
# nn.ReLU()
)
self.conv_single_map = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(64, 32, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.AvgPool2d((13, 13), padding=0, stride=(1, 1)),
# nn.Linear(32,1024),
# nn.ReLU()
)
self.spmap_up = nn.Sequential(
nn.Linear(32, 512),
nn.ReLU(),
)
self.posemap_up = nn.Sequential(
nn.Linear(32, 512),
nn.ReLU(),
)
#######################################
### Prediction Model for attention features#######
self.lin_spmap_tail = nn.Sequential(
nn.Linear(512, 29),
)
##################################################
######### Graph Model Basic Structure ########################
self.peo_to_obj_w = nn.Sequential(
nn.Linear(1024, 1024),
nn.ReLU(),
)
self.obj_to_peo_w = nn.Sequential(
nn.Linear(1024, 1024),
nn.ReLU(),
)
#################################################################
# Interaction prediction model for visual features######################
self.lin_single_head = nn.Sequential(
# nn.Linear(2048,1),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 3 + 4, 1024),
# nn.Linear(lin_size*3, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_single_tail = nn.Sequential(
# nn.ReLU(),
nn.Linear(512, 1),
# nn.Linear(10,1),
)
#################################################################
########## Prediction model for visual features#################
self.lin_visual_head = nn.Sequential(
# nn.Linear(2048, 29),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 3 + 4, 1024),
# nn.Linear(lin_size*3, 1024),
# nn.Linear(lin_size*3+4+sp_size, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
# nn.ReLU(),
)
self.lin_visual_tail = nn.Sequential(
nn.Linear(512, 29),
)
self.lin_dim_down = nn.Sequential(
nn.Linear(1536, 1024),
nn.Linear(1024, 512)
)
self.w2v_fc = nn.Sequential(
nn.Linear(80, 512),
nn.Linear(512, 512)
)
self.fc_obj = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
################################################
####### Prediction model for graph features##################
self.lin_graph_head = nn.Sequential(
# nn.Linear(2048, 29),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 2, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_graph_tail = nn.Sequential(
nn.Linear(512, 29),
)
########################################
self.sigmoid = nn.Sigmoid()
def forward(self, x, pairs_info, pairs_info_augmented, image_id, flag_,
phase):
out1 = self.Conv_pretrain(x)
rois_people, rois_objects, spatial_locs, union_box, object_one_hot, pose_box = ROI.get_pool_loc(out1, image_id, flag_,
size=pool_size,
spatial_scale=25,
batch_size=len(
pairs_info))
pose_box = torch.unsqueeze(pose_box, 1)
### Defining The Pooling Operations #######
x, y = out1.size()[2], out1.size()[3] # out1.size()=(batch,1024,25,25)
hum_pool = nn.AvgPool2d(pool_size, padding=0, stride=(1, 1))
obj_pool = nn.AvgPool2d(pool_size, padding=0, stride=(1, 1))
context_pool = nn.AvgPool2d((x, y), padding=0, stride=(1, 1))
#################################################
### Human###
residual_people = rois_people
res_people = self.Conv_people(rois_people) + residual_people
res_av_people = hum_pool(res_people)
out2_people = self.flat(res_av_people)
###########
##Objects##
residual_objects = rois_objects
res_objects = self.Conv_objects(rois_objects) + residual_objects
res_av_objects = obj_pool(res_objects)
out2_objects = self.flat(res_av_objects)
#############
#### Context ######
residual_context = out1
res_context = self.Conv_context(out1) + residual_context
res_av_context = context_pool(res_context)
out2_context = self.flat(res_av_context)
#################
##Attention Features## # 公式5
out2_union = self.spmap_up(self.flat(self.conv_sp_map(union_box)))
############################
#### Making Essential Pairing##########
pairs, people, objects_only, pair_obj_one_out = ROI.pairing(out2_people, out2_objects, out2_context,
spatial_locs, pairs_info,
object_one_hot)
####################################
### Muti-stream caculate##########
single_people = pairs[:, 0:1024]
single_object = pairs[:, 1024:2048]
single_context = pairs[:, 2048:]
single_people = self.lin_peo(single_people)
single_object = self.lin_obj(single_object)
single_context = self.lin_con(single_context)
out2_pose = self.posemap_up(self.flat(self.conv_single_map(pose_box)))
lin_stream_p = single_people * out2_pose
lin_stream_o = single_object
w2v_obj = self.w2v_fc(pair_obj_one_out)
lin_stream_c = single_context * out2_union
lin_fix_o = torch.cat((lin_stream_o, w2v_obj), 1)
lin_final_o = self.fc_obj(lin_fix_o)
lin_join = torch.cat((lin_stream_p, lin_final_o, lin_stream_c), 1)
lin_join = self.lin_dim_down(lin_join)
###### Interaction Probability########## i_ho
lin_single = self.lin_single_tail(lin_join)
interaction_prob = self.sigmoid(lin_single)
####################################################
######body_part stream##########
####################################################
####### Graph Model Base Structure##################
people_t = people
objects_only = objects_only
combine_g = []
people_f = []
objects_f = []
pairs_f = []
start_p = 0
start_o = 0
start_c = 0
for batch_num, l in enumerate(pairs_info):
####Slicing##########
people_this_batch = people_t[start_p:start_p + int(l[0])]
no_peo = len(people_this_batch)
objects_this_batch = objects_only[start_o:start_o + int(l[1])][1:]
no_objects_this_batch = objects_only[start_o:start_o + int(l[1])][0]
no_obj = len(objects_this_batch)
interaction_prob_this_batch = interaction_prob[start_c:start_c + int(l[1]) * int(l[0])]
if no_obj == 0:
people_this_batch_r = people_this_batch
objects_this_batch_r = no_objects_this_batch.view([1, 1024])
else:
peo_to_obj_this_batch = torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(people_this_batch) for ind_o, j in
enumerate(objects_this_batch)])
obj_to_peo_this_batch = torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(objects_this_batch) for ind_o, j in
enumerate(people_this_batch)])
###################
####### Adjecency###########
adj_l = []
adj_po = torch.zeros([no_peo, no_obj]).cuda()
adj_op = torch.zeros([no_obj, no_peo]).cuda()
for index_probs, probs in enumerate(interaction_prob_this_batch):
if index_probs % (no_obj + 1) != 0:
adj_l.append(probs)
adj_po = torch.cat(adj_l).view(len(adj_l), 1)
adj_op = adj_po
##############################
###Finding Out Refined Features######
people_this_batch_r = people_this_batch + torch.mm(adj_po.view([no_peo, no_obj]),
self.peo_to_obj_w(objects_this_batch))
objects_this_batch_r = objects_this_batch + torch.mm(adj_op.view([no_peo, no_obj]).t(),
self.obj_to_peo_w(people_this_batch))
objects_this_batch_r = torch.cat((no_objects_this_batch.view([1, 1024]), objects_this_batch_r))
#############################
#### Restructuring ####
people_f.append(people_this_batch_r)
people_t_f = people_this_batch_r
objects_f.append(objects_this_batch_r)
objects_t_f = objects_this_batch_r
pairs_f.append(torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(people_t_f) for ind_o, j in
enumerate(objects_t_f)]))
# import pdb;pdb.set_trace()
##############################
###Loop increment for next batch##
start_p += int(l[0])
start_o += int(l[1])
start_c += int(l[0]) * int(l[1])
#####################
people_graph = torch.cat(people_f)
objects_graph = torch.cat(objects_f)
pairs_graph = torch.cat(pairs_f)
######################################################################################################################################
#### Prediction from visual features####
# lin_h = self.lin_visual_head(pairs)
# lin_t = lin_h * out2_union
lin_visual = self.lin_visual_tail(lin_join)
##############################
#### Prediction from graph features####
lin_graph_h = self.lin_graph_head(pairs_graph)
lin_graph_t = lin_graph_h * out2_union
lin_graph = self.lin_graph_tail(lin_graph_t)
####################################
##### Prediction from attention features #######
lin_att = self.lin_spmap_tail(out2_union)
#############################
return [lin_visual, lin_single, lin_graph, lin_att] # ,lin_obj_ids]
|
<filename>scripts_vcoco/models/model.py<gh_stars>0
# coding=utf-8
from __future__ import print_function, division
import torch
import torch.nn as nn
import sys
sys.path.append("..")
import utils.pool_pairing as ROI
import torchvision.models as models
lin_size = 1024
ids = 80
context_size = 1024
sp_size = 1024
mul = 3
deep = 512
pool_size = (10, 10)
pool_size_pose = (18, 5, 5)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size()[0], -1)
class VSGNet(nn.Module):
def __init__(self):
super(VSGNet, self).__init__()
model = models.resnet152(pretrained=True)
self.flat = Flatten()
self.Conv_pretrain = nn.Sequential(*list(model.children())[0:7]) ## Resnets,resnext
######### Convolutional Blocks for human,objects and the context##############################
self.Conv_people = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.Conv_objects = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.Conv_context = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False),
nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(inplace=False),
)
self.lin_peo = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_obj = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_con = nn.Sequential(
nn.Linear(1028, 512),
nn.ReLU(),
)
###############################################################################################
##### Attention Feature Model######
self.conv_sp_map = nn.Sequential(
# nn.Conv2d(2, 64, kernel_size=(5, 5)),
nn.Conv2d(3, 64, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(64, 32, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.AvgPool2d((13, 13), padding=0, stride=(1, 1)),
# nn.Linear(32,1024),
# nn.ReLU()
)
self.conv_single_map = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.Conv2d(64, 32, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2)),
nn.AvgPool2d((13, 13), padding=0, stride=(1, 1)),
# nn.Linear(32,1024),
# nn.ReLU()
)
self.spmap_up = nn.Sequential(
nn.Linear(32, 512),
nn.ReLU(),
)
self.posemap_up = nn.Sequential(
nn.Linear(32, 512),
nn.ReLU(),
)
#######################################
### Prediction Model for attention features#######
self.lin_spmap_tail = nn.Sequential(
nn.Linear(512, 29),
)
##################################################
######### Graph Model Basic Structure ########################
self.peo_to_obj_w = nn.Sequential(
nn.Linear(1024, 1024),
nn.ReLU(),
)
self.obj_to_peo_w = nn.Sequential(
nn.Linear(1024, 1024),
nn.ReLU(),
)
#################################################################
# Interaction prediction model for visual features######################
self.lin_single_head = nn.Sequential(
# nn.Linear(2048,1),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 3 + 4, 1024),
# nn.Linear(lin_size*3, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_single_tail = nn.Sequential(
# nn.ReLU(),
nn.Linear(512, 1),
# nn.Linear(10,1),
)
#################################################################
########## Prediction model for visual features#################
self.lin_visual_head = nn.Sequential(
# nn.Linear(2048, 29),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 3 + 4, 1024),
# nn.Linear(lin_size*3, 1024),
# nn.Linear(lin_size*3+4+sp_size, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
# nn.ReLU(),
)
self.lin_visual_tail = nn.Sequential(
nn.Linear(512, 29),
)
self.lin_dim_down = nn.Sequential(
nn.Linear(1536, 1024),
nn.Linear(1024, 512)
)
self.w2v_fc = nn.Sequential(
nn.Linear(80, 512),
nn.Linear(512, 512)
)
self.fc_obj = nn.Sequential(
nn.Linear(1024, 512),
nn.ReLU(),
)
################################################
####### Prediction model for graph features##################
self.lin_graph_head = nn.Sequential(
# nn.Linear(2048, 29),
# nn.Dropout2d(p=0.5),
nn.Linear(lin_size * 2, 1024),
nn.Linear(1024, 512),
nn.ReLU(),
)
self.lin_graph_tail = nn.Sequential(
nn.Linear(512, 29),
)
########################################
self.sigmoid = nn.Sigmoid()
def forward(self, x, pairs_info, pairs_info_augmented, image_id, flag_,
phase):
out1 = self.Conv_pretrain(x)
rois_people, rois_objects, spatial_locs, union_box, object_one_hot, pose_box = ROI.get_pool_loc(out1, image_id, flag_,
size=pool_size,
spatial_scale=25,
batch_size=len(
pairs_info))
pose_box = torch.unsqueeze(pose_box, 1)
### Defining The Pooling Operations #######
x, y = out1.size()[2], out1.size()[3] # out1.size()=(batch,1024,25,25)
hum_pool = nn.AvgPool2d(pool_size, padding=0, stride=(1, 1))
obj_pool = nn.AvgPool2d(pool_size, padding=0, stride=(1, 1))
context_pool = nn.AvgPool2d((x, y), padding=0, stride=(1, 1))
#################################################
### Human###
residual_people = rois_people
res_people = self.Conv_people(rois_people) + residual_people
res_av_people = hum_pool(res_people)
out2_people = self.flat(res_av_people)
###########
##Objects##
residual_objects = rois_objects
res_objects = self.Conv_objects(rois_objects) + residual_objects
res_av_objects = obj_pool(res_objects)
out2_objects = self.flat(res_av_objects)
#############
#### Context ######
residual_context = out1
res_context = self.Conv_context(out1) + residual_context
res_av_context = context_pool(res_context)
out2_context = self.flat(res_av_context)
#################
##Attention Features## # 公式5
out2_union = self.spmap_up(self.flat(self.conv_sp_map(union_box)))
############################
#### Making Essential Pairing##########
pairs, people, objects_only, pair_obj_one_out = ROI.pairing(out2_people, out2_objects, out2_context,
spatial_locs, pairs_info,
object_one_hot)
####################################
### Muti-stream caculate##########
single_people = pairs[:, 0:1024]
single_object = pairs[:, 1024:2048]
single_context = pairs[:, 2048:]
single_people = self.lin_peo(single_people)
single_object = self.lin_obj(single_object)
single_context = self.lin_con(single_context)
out2_pose = self.posemap_up(self.flat(self.conv_single_map(pose_box)))
lin_stream_p = single_people * out2_pose
lin_stream_o = single_object
w2v_obj = self.w2v_fc(pair_obj_one_out)
lin_stream_c = single_context * out2_union
lin_fix_o = torch.cat((lin_stream_o, w2v_obj), 1)
lin_final_o = self.fc_obj(lin_fix_o)
lin_join = torch.cat((lin_stream_p, lin_final_o, lin_stream_c), 1)
lin_join = self.lin_dim_down(lin_join)
###### Interaction Probability########## i_ho
lin_single = self.lin_single_tail(lin_join)
interaction_prob = self.sigmoid(lin_single)
####################################################
######body_part stream##########
####################################################
####### Graph Model Base Structure##################
people_t = people
objects_only = objects_only
combine_g = []
people_f = []
objects_f = []
pairs_f = []
start_p = 0
start_o = 0
start_c = 0
for batch_num, l in enumerate(pairs_info):
####Slicing##########
people_this_batch = people_t[start_p:start_p + int(l[0])]
no_peo = len(people_this_batch)
objects_this_batch = objects_only[start_o:start_o + int(l[1])][1:]
no_objects_this_batch = objects_only[start_o:start_o + int(l[1])][0]
no_obj = len(objects_this_batch)
interaction_prob_this_batch = interaction_prob[start_c:start_c + int(l[1]) * int(l[0])]
if no_obj == 0:
people_this_batch_r = people_this_batch
objects_this_batch_r = no_objects_this_batch.view([1, 1024])
else:
peo_to_obj_this_batch = torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(people_this_batch) for ind_o, j in
enumerate(objects_this_batch)])
obj_to_peo_this_batch = torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(objects_this_batch) for ind_o, j in
enumerate(people_this_batch)])
###################
####### Adjecency###########
adj_l = []
adj_po = torch.zeros([no_peo, no_obj]).cuda()
adj_op = torch.zeros([no_obj, no_peo]).cuda()
for index_probs, probs in enumerate(interaction_prob_this_batch):
if index_probs % (no_obj + 1) != 0:
adj_l.append(probs)
adj_po = torch.cat(adj_l).view(len(adj_l), 1)
adj_op = adj_po
##############################
###Finding Out Refined Features######
people_this_batch_r = people_this_batch + torch.mm(adj_po.view([no_peo, no_obj]),
self.peo_to_obj_w(objects_this_batch))
objects_this_batch_r = objects_this_batch + torch.mm(adj_op.view([no_peo, no_obj]).t(),
self.obj_to_peo_w(people_this_batch))
objects_this_batch_r = torch.cat((no_objects_this_batch.view([1, 1024]), objects_this_batch_r))
#############################
#### Restructuring ####
people_f.append(people_this_batch_r)
people_t_f = people_this_batch_r
objects_f.append(objects_this_batch_r)
objects_t_f = objects_this_batch_r
pairs_f.append(torch.stack(
[torch.cat((i, j)) for ind_p, i in enumerate(people_t_f) for ind_o, j in
enumerate(objects_t_f)]))
# import pdb;pdb.set_trace()
##############################
###Loop increment for next batch##
start_p += int(l[0])
start_o += int(l[1])
start_c += int(l[0]) * int(l[1])
#####################
people_graph = torch.cat(people_f)
objects_graph = torch.cat(objects_f)
pairs_graph = torch.cat(pairs_f)
######################################################################################################################################
#### Prediction from visual features####
# lin_h = self.lin_visual_head(pairs)
# lin_t = lin_h * out2_union
lin_visual = self.lin_visual_tail(lin_join)
##############################
#### Prediction from graph features####
lin_graph_h = self.lin_graph_head(pairs_graph)
lin_graph_t = lin_graph_h * out2_union
lin_graph = self.lin_graph_tail(lin_graph_t)
####################################
##### Prediction from attention features #######
lin_att = self.lin_spmap_tail(out2_union)
#############################
return [lin_visual, lin_single, lin_graph, lin_att] # ,lin_obj_ids]
|
de
| 0.640296
|
# coding=utf-8 ## Resnets,resnext ######### Convolutional Blocks for human,objects and the context############################## ############################################################################################### ##### Attention Feature Model###### # nn.Conv2d(2, 64, kernel_size=(5, 5)), # nn.Linear(32,1024), # nn.ReLU() # nn.Linear(32,1024), # nn.ReLU() ####################################### ### Prediction Model for attention features####### ################################################## ######### Graph Model Basic Structure ######################## ################################################################# # Interaction prediction model for visual features###################### # nn.Linear(2048,1), # nn.Dropout2d(p=0.5), # nn.Linear(lin_size*3, 1024), # nn.ReLU(), # nn.Linear(10,1), ################################################################# ########## Prediction model for visual features################# # nn.Linear(2048, 29), # nn.Dropout2d(p=0.5), # nn.Linear(lin_size*3, 1024), # nn.Linear(lin_size*3+4+sp_size, 1024), # nn.ReLU(), ################################################ ####### Prediction model for graph features################## # nn.Linear(2048, 29), # nn.Dropout2d(p=0.5), ######################################## ### Defining The Pooling Operations ####### # out1.size()=(batch,1024,25,25) ################################################# ### Human### ########### ##Objects## ############# #### Context ###### ################# ##Attention Features## # 公式5 ############################ #### Making Essential Pairing########## #################################### ### Muti-stream caculate########## ###### Interaction Probability########## i_ho #################################################### ######body_part stream########## #################################################### ####### Graph Model Base Structure################## ####Slicing########## ################### ####### Adjecency########### ############################## ###Finding Out Refined Features###### ############################# #### Restructuring #### # import pdb;pdb.set_trace() ############################## ###Loop increment for next batch## ##################### ###################################################################################################################################### #### Prediction from visual features#### # lin_h = self.lin_visual_head(pairs) # lin_t = lin_h * out2_union ############################## #### Prediction from graph features#### #################################### ##### Prediction from attention features ####### ############################# # ,lin_obj_ids]
| 2.105837
| 2
|
pinax/projects/intranet_project/apps/intranet_account/urls.py
|
keimlink/pinax
| 2
|
6625353
|
from django.conf.urls.defaults import *
from intranet_account.forms import *
urlpatterns = patterns('',
url(r'^email/$', 'intranet_account.views.email', name="acct_email"),
url(r'^login/$', 'intranet_account.views.login', name="acct_login"),
url(r'^password_change/$', 'intranet_account.views.password_change', name="acct_passwd"),
url(r'^password_set/$', 'intranet_account.views.password_set', name="acct_passwd_set"),
url(r'^password_delete/$', 'intranet_account.views.password_delete', name="acct_passwd_delete"),
url(r'^password_delete/done/$', 'django.views.generic.simple.direct_to_template', {
"template": "account/password_delete_done.html",
}, name="acct_passwd_delete_done"),
url(r'^password_reset/$', 'intranet_account.views.password_reset', name="acct_passwd_reset"),
url(r'^timezone/$', 'intranet_account.views.timezone_change', name="acct_timezone_change"),
url(r'^other_services/$', 'intranet_account.views.other_services', name="acct_other_services"),
url(r'^language/$', 'intranet_account.views.language_change', name="acct_language_change"),
url(r'^logout/$', 'django.contrib.auth.views.logout', {"template_name": "account/logout.html"}, name="acct_logout"),
url(r'^confirm_email/(\w+)/$', 'emailconfirmation.views.confirm_email', name="acct_confirm_email"),
# ajax validation
(r'^validate/$', 'ajax_validation.views.validate', {'form_class': SignupForm}, 'signup_form_validate'),
)
|
from django.conf.urls.defaults import *
from intranet_account.forms import *
urlpatterns = patterns('',
url(r'^email/$', 'intranet_account.views.email', name="acct_email"),
url(r'^login/$', 'intranet_account.views.login', name="acct_login"),
url(r'^password_change/$', 'intranet_account.views.password_change', name="acct_passwd"),
url(r'^password_set/$', 'intranet_account.views.password_set', name="acct_passwd_set"),
url(r'^password_delete/$', 'intranet_account.views.password_delete', name="acct_passwd_delete"),
url(r'^password_delete/done/$', 'django.views.generic.simple.direct_to_template', {
"template": "account/password_delete_done.html",
}, name="acct_passwd_delete_done"),
url(r'^password_reset/$', 'intranet_account.views.password_reset', name="acct_passwd_reset"),
url(r'^timezone/$', 'intranet_account.views.timezone_change', name="acct_timezone_change"),
url(r'^other_services/$', 'intranet_account.views.other_services', name="acct_other_services"),
url(r'^language/$', 'intranet_account.views.language_change', name="acct_language_change"),
url(r'^logout/$', 'django.contrib.auth.views.logout', {"template_name": "account/logout.html"}, name="acct_logout"),
url(r'^confirm_email/(\w+)/$', 'emailconfirmation.views.confirm_email', name="acct_confirm_email"),
# ajax validation
(r'^validate/$', 'ajax_validation.views.validate', {'form_class': SignupForm}, 'signup_form_validate'),
)
|
en
| 0.281181
|
# ajax validation
| 1.76271
| 2
|
congress/api/table_model.py
|
poobalan-arumugam/congress
| 0
|
6625354
|
<gh_stars>0
# Copyright (c) 2014 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
from congress.api import api_utils
from congress.api import base
from congress.api import webservice
from congress import exception
LOG = logging.getLogger(__name__)
class TableModel(base.APIModel):
"""Model for handling API requests about Tables."""
# Note(thread-safety): blocking function
def get_item(self, id_, params, context=None):
"""Retrieve item with id id\_ from model.
:param: id\_: The ID of the item to retrieve
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: The matching item or None if item with id\_ does not exist.
"""
# Note(thread-safety): blocking call
caller, source_id = api_utils.get_id_from_context(context)
# FIXME(threod-safety): in DSE2, the returned caller can be a
# datasource name. But the datasource name may now refer to a new,
# unrelated datasource. Causing the rest of this code to operate on
# an unintended datasource.
# Fix: check UUID of datasource before operating. Abort if mismatch
args = {'source_id': source_id, 'table_id': id_}
try:
# Note(thread-safety): blocking call
tablename = self.invoke_rpc(caller, 'get_tablename', args)
except exception.CongressException as e:
LOG.exception("Exception occurred while retrieving table %s"
"from datasource %s", id_, source_id)
raise webservice.DataModelException.create(e)
if tablename:
return {'id': tablename}
LOG.info('table id %s is not found in datasource %s', id_, source_id)
# Note(thread-safety): blocking function
def get_items(self, params, context=None):
"""Get items in model.
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: A dict containing at least a 'results' key whose value is
a list of items in the model. Additional keys set in the
dict will also be rendered for the user.
"""
LOG.info('get_items has context %s', context)
# Note(thread-safety): blocking call
caller, source_id = api_utils.get_id_from_context(context)
# FIXME(threod-safety): in DSE2, the returned caller can be a
# datasource name. But the datasource name may now refer to a new,
# unrelated datasource. Causing the rest of this code to operate on
# an unintended datasource.
# Fix: check UUID of datasource before operating. Abort if mismatch
try:
# Note(thread-safety): blocking call
tablenames = self.invoke_rpc(caller, 'get_tablenames',
{'source_id': source_id})
except exception.CongressException as e:
LOG.exception("Exception occurred while retrieving tables"
"from datasource %s", source_id)
raise webservice.DataModelException.create(e)
# when the source_id doesn't have any table, 'tablenames' is set([])
if isinstance(tablenames, set) or isinstance(tablenames, list):
return {'results': [{'id': x} for x in tablenames]}
# Tables can only be created/updated/deleted by writing policy
# or by adding new data sources. Once we have internal data sources
# we need to implement all of these.
# def add_item(self, item, id_=None, context=None):
# """Add item to model.
# Args:
# item: The item to add to the model
# id_: The ID of the item, or None if an ID should be generated
# context: Key-values providing frame of reference of request
# Returns:
# Tuple of (ID, newly_created_item)
# Raises:
# KeyError: ID already exists.
# """
# def update_item(self, id_, item, context=None):
# """Update item with id\_ with new data.
# Args:
# id_: The ID of the item to be updated
# item: The new item
# context: Key-values providing frame of reference of request
# Returns:
# The updated item.
# Raises:
# KeyError: Item with specified id\_ not present.
# """
# # currently a noop since the owner_id cannot be changed
# if id_ not in self.items:
# raise KeyError("Cannot update item with ID '%s': "
# "ID does not exist")
# return item
# def delete_item(self, id_, context=None):
# """Remove item from model.
# Args:
# id_: The ID of the item to be removed
# context: Key-values providing frame of reference of request
# Returns:
# The removed item.
# Raises:
# KeyError: Item with specified id\_ not present.
# """
|
# Copyright (c) 2014 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
from congress.api import api_utils
from congress.api import base
from congress.api import webservice
from congress import exception
LOG = logging.getLogger(__name__)
class TableModel(base.APIModel):
"""Model for handling API requests about Tables."""
# Note(thread-safety): blocking function
def get_item(self, id_, params, context=None):
"""Retrieve item with id id\_ from model.
:param: id\_: The ID of the item to retrieve
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: The matching item or None if item with id\_ does not exist.
"""
# Note(thread-safety): blocking call
caller, source_id = api_utils.get_id_from_context(context)
# FIXME(threod-safety): in DSE2, the returned caller can be a
# datasource name. But the datasource name may now refer to a new,
# unrelated datasource. Causing the rest of this code to operate on
# an unintended datasource.
# Fix: check UUID of datasource before operating. Abort if mismatch
args = {'source_id': source_id, 'table_id': id_}
try:
# Note(thread-safety): blocking call
tablename = self.invoke_rpc(caller, 'get_tablename', args)
except exception.CongressException as e:
LOG.exception("Exception occurred while retrieving table %s"
"from datasource %s", id_, source_id)
raise webservice.DataModelException.create(e)
if tablename:
return {'id': tablename}
LOG.info('table id %s is not found in datasource %s', id_, source_id)
# Note(thread-safety): blocking function
def get_items(self, params, context=None):
"""Get items in model.
:param: params: A dict-like object containing parameters
from the request query string and body.
:param: context: Key-values providing frame of reference of request
:returns: A dict containing at least a 'results' key whose value is
a list of items in the model. Additional keys set in the
dict will also be rendered for the user.
"""
LOG.info('get_items has context %s', context)
# Note(thread-safety): blocking call
caller, source_id = api_utils.get_id_from_context(context)
# FIXME(threod-safety): in DSE2, the returned caller can be a
# datasource name. But the datasource name may now refer to a new,
# unrelated datasource. Causing the rest of this code to operate on
# an unintended datasource.
# Fix: check UUID of datasource before operating. Abort if mismatch
try:
# Note(thread-safety): blocking call
tablenames = self.invoke_rpc(caller, 'get_tablenames',
{'source_id': source_id})
except exception.CongressException as e:
LOG.exception("Exception occurred while retrieving tables"
"from datasource %s", source_id)
raise webservice.DataModelException.create(e)
# when the source_id doesn't have any table, 'tablenames' is set([])
if isinstance(tablenames, set) or isinstance(tablenames, list):
return {'results': [{'id': x} for x in tablenames]}
# Tables can only be created/updated/deleted by writing policy
# or by adding new data sources. Once we have internal data sources
# we need to implement all of these.
# def add_item(self, item, id_=None, context=None):
# """Add item to model.
# Args:
# item: The item to add to the model
# id_: The ID of the item, or None if an ID should be generated
# context: Key-values providing frame of reference of request
# Returns:
# Tuple of (ID, newly_created_item)
# Raises:
# KeyError: ID already exists.
# """
# def update_item(self, id_, item, context=None):
# """Update item with id\_ with new data.
# Args:
# id_: The ID of the item to be updated
# item: The new item
# context: Key-values providing frame of reference of request
# Returns:
# The updated item.
# Raises:
# KeyError: Item with specified id\_ not present.
# """
# # currently a noop since the owner_id cannot be changed
# if id_ not in self.items:
# raise KeyError("Cannot update item with ID '%s': "
# "ID does not exist")
# return item
# def delete_item(self, id_, context=None):
# """Remove item from model.
# Args:
# id_: The ID of the item to be removed
# context: Key-values providing frame of reference of request
# Returns:
# The removed item.
# Raises:
# KeyError: Item with specified id\_ not present.
# """
|
en
| 0.740967
|
# Copyright (c) 2014 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Model for handling API requests about Tables. # Note(thread-safety): blocking function Retrieve item with id id\_ from model. :param: id\_: The ID of the item to retrieve :param: params: A dict-like object containing parameters from the request query string and body. :param: context: Key-values providing frame of reference of request :returns: The matching item or None if item with id\_ does not exist. # Note(thread-safety): blocking call # FIXME(threod-safety): in DSE2, the returned caller can be a # datasource name. But the datasource name may now refer to a new, # unrelated datasource. Causing the rest of this code to operate on # an unintended datasource. # Fix: check UUID of datasource before operating. Abort if mismatch # Note(thread-safety): blocking call # Note(thread-safety): blocking function Get items in model. :param: params: A dict-like object containing parameters from the request query string and body. :param: context: Key-values providing frame of reference of request :returns: A dict containing at least a 'results' key whose value is a list of items in the model. Additional keys set in the dict will also be rendered for the user. # Note(thread-safety): blocking call # FIXME(threod-safety): in DSE2, the returned caller can be a # datasource name. But the datasource name may now refer to a new, # unrelated datasource. Causing the rest of this code to operate on # an unintended datasource. # Fix: check UUID of datasource before operating. Abort if mismatch # Note(thread-safety): blocking call # when the source_id doesn't have any table, 'tablenames' is set([]) # Tables can only be created/updated/deleted by writing policy # or by adding new data sources. Once we have internal data sources # we need to implement all of these. # def add_item(self, item, id_=None, context=None): # """Add item to model. # Args: # item: The item to add to the model # id_: The ID of the item, or None if an ID should be generated # context: Key-values providing frame of reference of request # Returns: # Tuple of (ID, newly_created_item) # Raises: # KeyError: ID already exists. # """ # def update_item(self, id_, item, context=None): # """Update item with id\_ with new data. # Args: # id_: The ID of the item to be updated # item: The new item # context: Key-values providing frame of reference of request # Returns: # The updated item. # Raises: # KeyError: Item with specified id\_ not present. # """ # # currently a noop since the owner_id cannot be changed # if id_ not in self.items: # raise KeyError("Cannot update item with ID '%s': " # "ID does not exist") # return item # def delete_item(self, id_, context=None): # """Remove item from model. # Args: # id_: The ID of the item to be removed # context: Key-values providing frame of reference of request # Returns: # The removed item. # Raises: # KeyError: Item with specified id\_ not present. # """
| 1.893083
| 2
|
tests/test_parsers.py
|
MadSciSoCool/zhinst-toolkit
| 0
|
6625355
|
import pytest
from hypothesis import given, assume, strategies as st
from hypothesis.stateful import rule, precondition, RuleBasedStateMachine
import numpy as np
from .context import Parse
@given(st.integers(0, 5))
def test_set_on_off(n):
map = {0: "off", 1: "on", 2: "OFF", 3: "ON", 4: 0, 5: 1}
val = Parse.set_on_off(map[n])
assert val in [0, 1]
with pytest.raises(ValueError):
Parse.set_on_off([])
with pytest.raises(ValueError):
Parse.set_on_off("a;kdhf")
@given(st.integers(0, 3))
def test_get_on_off(n):
if n < 2:
v = Parse.get_on_off(n)
assert v in ["on", "off"]
else:
with pytest.raises(ValueError):
Parse.get_on_off(n)
@given(st.integers(0, 4))
def test_get_locked_status(n):
if n < 3:
v = Parse.get_locked_status(n)
assert v in ["locked", "error", "busy"]
else:
with pytest.raises(ValueError):
Parse.get_locked_status(n)
@given(st.floats(0.0001, 1.0))
def test_samples2time_and_back(t):
samples = Parse.uhfqa_time2samples(t)
time = Parse.uhfqa_samples2time(samples)
assert abs(t - time) <= 1e-5
@given(st.integers(-180, 179))
def test_complex2deg_and_back(deg):
complex = Parse.deg2complex(deg)
d = Parse.complex2deg(complex)
assert abs(d - deg) < 1e-5
|
import pytest
from hypothesis import given, assume, strategies as st
from hypothesis.stateful import rule, precondition, RuleBasedStateMachine
import numpy as np
from .context import Parse
@given(st.integers(0, 5))
def test_set_on_off(n):
map = {0: "off", 1: "on", 2: "OFF", 3: "ON", 4: 0, 5: 1}
val = Parse.set_on_off(map[n])
assert val in [0, 1]
with pytest.raises(ValueError):
Parse.set_on_off([])
with pytest.raises(ValueError):
Parse.set_on_off("a;kdhf")
@given(st.integers(0, 3))
def test_get_on_off(n):
if n < 2:
v = Parse.get_on_off(n)
assert v in ["on", "off"]
else:
with pytest.raises(ValueError):
Parse.get_on_off(n)
@given(st.integers(0, 4))
def test_get_locked_status(n):
if n < 3:
v = Parse.get_locked_status(n)
assert v in ["locked", "error", "busy"]
else:
with pytest.raises(ValueError):
Parse.get_locked_status(n)
@given(st.floats(0.0001, 1.0))
def test_samples2time_and_back(t):
samples = Parse.uhfqa_time2samples(t)
time = Parse.uhfqa_samples2time(samples)
assert abs(t - time) <= 1e-5
@given(st.integers(-180, 179))
def test_complex2deg_and_back(deg):
complex = Parse.deg2complex(deg)
d = Parse.complex2deg(complex)
assert abs(d - deg) < 1e-5
|
none
| 1
| 2.217068
| 2
|
|
tests/models_tests/test_weavenet.py
|
pfnet/chainerchem
| 184
|
6625356
|
from chainer import cuda
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.models.weavenet import WeaveNet
from chainer_chemistry.utils.permutation import permute_adj
from chainer_chemistry.utils.permutation import permute_node
atom_size = 5
weave_channels = [50, 50]
batch_size = 2
atom_feature_dim = 23
pair_feature_dim = 10
out_dim = weave_channels[-1]
@pytest.fixture
def model():
return WeaveNet(weave_channels=weave_channels, n_atom=atom_size)
@pytest.fixture
def model_processed():
"""model to test `atom_data_processed` input"""
return WeaveNet(weave_channels=weave_channels, n_atom=atom_size)
@pytest.fixture
def data():
numpy.random.seed(0)
atom_data_processed = numpy.random.uniform(
0, high=1, size=(batch_size, atom_size, atom_feature_dim)
).astype(numpy.float32)
atom_data = numpy.random.randint(
0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)
).astype(numpy.int32)
adj_data = numpy.random.uniform(
0, high=1, size=(batch_size, pair_feature_dim, atom_size, atom_size)
).astype(numpy.float32)
# adj_data is symmetric along pair of atoms
# adj_data = adj_data + adj_data.swapaxes(-1, -2)
adj_data = adj_data.transpose((0, 3, 2, 1)).reshape(
batch_size, atom_size * atom_size, pair_feature_dim
).astype(numpy.float32)
y_grad = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
return atom_data_processed, atom_data, adj_data, y_grad
def check_forward(model, atom_data, adj_data):
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
print('y_actual', y_actual.shape)
assert y_actual.shape == (batch_size, out_dim)
def test_forward_cpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data = data[0:3]
check_forward(model, atom_data, adj_data)
check_forward(model_processed, atom_data_processed, adj_data)
@pytest.mark.gpu
def test_forward_gpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data = \
[cuda.to_gpu(d) for d in data[0:3]]
model.to_gpu()
model_processed.to_gpu()
check_forward(model, atom_data, adj_data)
check_forward(model_processed, atom_data_processed, adj_data)
def test_backward_cpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data, y_grad = data
gradient_check.check_backward(model, (atom_data, adj_data), y_grad,
atol=1e-1, rtol=1e-1)
gradient_check.check_backward(model_processed, (atom_data_processed,
adj_data), y_grad,
atol=1e-1, rtol=1e-1)
@pytest.mark.gpu
def test_backward_gpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data, y_grad = \
[cuda.to_gpu(d) for d in data]
model.to_gpu()
model_processed.to_gpu()
gradient_check.check_backward(
model, (atom_data, adj_data), y_grad, atol=1e-1, rtol=1e-1)
gradient_check.check_backward(
model_processed, (atom_data_processed, adj_data), y_grad,
atol=1e-1, rtol=1e-1)
def _test_forward_cpu_graph_invariant(
model, atom_data, adj_data, node_permute_axis=-1):
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
permutation_index = numpy.random.permutation(atom_size)
permute_atom_data = permute_node(atom_data, permutation_index,
axis=node_permute_axis)
permute_adj_data = adj_data.reshape(
batch_size, atom_size, atom_size, pair_feature_dim
).astype(numpy.float32)
permute_adj_data = permute_adj(
permute_adj_data, permutation_index, axis=[1, 2])
permute_adj_data = permute_adj_data.reshape(
batch_size, atom_size * atom_size, pair_feature_dim
).astype(numpy.float32)
permute_y_actual = cuda.to_cpu(model(
permute_atom_data, permute_adj_data).data)
assert numpy.allclose(y_actual, permute_y_actual, rtol=1.e-4, atol=1.e-6)
def test_forward_cpu_graph_invariant_embed(model, data):
atom_data, adj_data = data[1], data[2]
_test_forward_cpu_graph_invariant(
model, atom_data, adj_data, node_permute_axis=-1)
def test_forward_cpu_graph_invariant_processed(model_processed, data):
atom_data_processed, adj_data = data[0], data[2]
_test_forward_cpu_graph_invariant(
model_processed, atom_data_processed, adj_data, node_permute_axis=1)
if __name__ == '__main__':
pytest.main([__file__, '-v'])
|
from chainer import cuda
from chainer import gradient_check
import numpy
import pytest
from chainer_chemistry.config import MAX_ATOMIC_NUM
from chainer_chemistry.models.weavenet import WeaveNet
from chainer_chemistry.utils.permutation import permute_adj
from chainer_chemistry.utils.permutation import permute_node
atom_size = 5
weave_channels = [50, 50]
batch_size = 2
atom_feature_dim = 23
pair_feature_dim = 10
out_dim = weave_channels[-1]
@pytest.fixture
def model():
return WeaveNet(weave_channels=weave_channels, n_atom=atom_size)
@pytest.fixture
def model_processed():
"""model to test `atom_data_processed` input"""
return WeaveNet(weave_channels=weave_channels, n_atom=atom_size)
@pytest.fixture
def data():
numpy.random.seed(0)
atom_data_processed = numpy.random.uniform(
0, high=1, size=(batch_size, atom_size, atom_feature_dim)
).astype(numpy.float32)
atom_data = numpy.random.randint(
0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)
).astype(numpy.int32)
adj_data = numpy.random.uniform(
0, high=1, size=(batch_size, pair_feature_dim, atom_size, atom_size)
).astype(numpy.float32)
# adj_data is symmetric along pair of atoms
# adj_data = adj_data + adj_data.swapaxes(-1, -2)
adj_data = adj_data.transpose((0, 3, 2, 1)).reshape(
batch_size, atom_size * atom_size, pair_feature_dim
).astype(numpy.float32)
y_grad = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
return atom_data_processed, atom_data, adj_data, y_grad
def check_forward(model, atom_data, adj_data):
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
print('y_actual', y_actual.shape)
assert y_actual.shape == (batch_size, out_dim)
def test_forward_cpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data = data[0:3]
check_forward(model, atom_data, adj_data)
check_forward(model_processed, atom_data_processed, adj_data)
@pytest.mark.gpu
def test_forward_gpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data = \
[cuda.to_gpu(d) for d in data[0:3]]
model.to_gpu()
model_processed.to_gpu()
check_forward(model, atom_data, adj_data)
check_forward(model_processed, atom_data_processed, adj_data)
def test_backward_cpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data, y_grad = data
gradient_check.check_backward(model, (atom_data, adj_data), y_grad,
atol=1e-1, rtol=1e-1)
gradient_check.check_backward(model_processed, (atom_data_processed,
adj_data), y_grad,
atol=1e-1, rtol=1e-1)
@pytest.mark.gpu
def test_backward_gpu(model, model_processed, data):
atom_data_processed, atom_data, adj_data, y_grad = \
[cuda.to_gpu(d) for d in data]
model.to_gpu()
model_processed.to_gpu()
gradient_check.check_backward(
model, (atom_data, adj_data), y_grad, atol=1e-1, rtol=1e-1)
gradient_check.check_backward(
model_processed, (atom_data_processed, adj_data), y_grad,
atol=1e-1, rtol=1e-1)
def _test_forward_cpu_graph_invariant(
model, atom_data, adj_data, node_permute_axis=-1):
y_actual = cuda.to_cpu(model(atom_data, adj_data).data)
permutation_index = numpy.random.permutation(atom_size)
permute_atom_data = permute_node(atom_data, permutation_index,
axis=node_permute_axis)
permute_adj_data = adj_data.reshape(
batch_size, atom_size, atom_size, pair_feature_dim
).astype(numpy.float32)
permute_adj_data = permute_adj(
permute_adj_data, permutation_index, axis=[1, 2])
permute_adj_data = permute_adj_data.reshape(
batch_size, atom_size * atom_size, pair_feature_dim
).astype(numpy.float32)
permute_y_actual = cuda.to_cpu(model(
permute_atom_data, permute_adj_data).data)
assert numpy.allclose(y_actual, permute_y_actual, rtol=1.e-4, atol=1.e-6)
def test_forward_cpu_graph_invariant_embed(model, data):
atom_data, adj_data = data[1], data[2]
_test_forward_cpu_graph_invariant(
model, atom_data, adj_data, node_permute_axis=-1)
def test_forward_cpu_graph_invariant_processed(model_processed, data):
atom_data_processed, adj_data = data[0], data[2]
_test_forward_cpu_graph_invariant(
model_processed, atom_data_processed, adj_data, node_permute_axis=1)
if __name__ == '__main__':
pytest.main([__file__, '-v'])
|
en
| 0.405498
|
model to test `atom_data_processed` input # adj_data is symmetric along pair of atoms # adj_data = adj_data + adj_data.swapaxes(-1, -2)
| 2.004138
| 2
|
metricbeat/tests/system/test_autodiscover.py
|
IzekChen/beats
| 8
|
6625357
|
import os
import metricbeat
import unittest
from time import sleep
from beat.beat import INTEGRATION_TESTS
class TestAutodiscover(metricbeat.BaseTest):
"""
Test metricbeat autodiscover
"""
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_docker(self):
"""
Test docker autodiscover starts modules from templates
"""
import docker
docker_client = docker.from_env()
self.render_config_template(
autodiscover={
'docker': {
'templates': '''
- condition:
equals.docker.container.image: memcached:latest
config:
- module: memcached
metricsets: ["stats"]
period: 1s
hosts: ["${data.host}:11211"]
''',
},
},
)
proc = self.start_beat()
docker_client.images.pull('memcached:latest')
container = docker_client.containers.run('memcached:latest', detach=True)
self.wait_until(lambda: self.log_contains('Starting runner: memcached'))
self.wait_until(lambda: self.output_count(lambda x: x >= 1))
container.stop()
self.wait_until(lambda: self.log_contains('Stopping runner: memcached'))
output = self.read_output_json()
proc.check_kill_and_wait()
# Check metadata is added
assert output[0]['docker']['container']['image'] == 'memcached:latest'
assert output[0]['docker']['container']['labels'] == {}
assert 'name' in output[0]['docker']['container']
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_docker_labels(self):
"""
Test docker autodiscover starts modules from labels
"""
import docker
docker_client = docker.from_env()
self.render_config_template(
autodiscover={
'docker': {
'hints.enabled': 'true',
},
},
)
proc = self.start_beat()
docker_client.images.pull('memcached:latest')
labels = {
'co.elastic.metrics/module': 'memcached',
'co.elastic.metrics/period': '1s',
'co.elastic.metrics/hosts': "'${data.host}:11211'",
}
container = docker_client.containers.run('memcached:latest', labels=labels, detach=True)
self.wait_until(lambda: self.log_contains('Starting runner: memcached'))
self.wait_until(lambda: self.output_count(lambda x: x >= 1))
container.stop()
self.wait_until(lambda: self.log_contains('Stopping runner: memcached'))
output = self.read_output_json()
proc.check_kill_and_wait()
# Check metadata is added
assert output[0]['docker']['container']['image'] == 'memcached:latest'
assert 'name' in output[0]['docker']['container']
|
import os
import metricbeat
import unittest
from time import sleep
from beat.beat import INTEGRATION_TESTS
class TestAutodiscover(metricbeat.BaseTest):
"""
Test metricbeat autodiscover
"""
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_docker(self):
"""
Test docker autodiscover starts modules from templates
"""
import docker
docker_client = docker.from_env()
self.render_config_template(
autodiscover={
'docker': {
'templates': '''
- condition:
equals.docker.container.image: memcached:latest
config:
- module: memcached
metricsets: ["stats"]
period: 1s
hosts: ["${data.host}:11211"]
''',
},
},
)
proc = self.start_beat()
docker_client.images.pull('memcached:latest')
container = docker_client.containers.run('memcached:latest', detach=True)
self.wait_until(lambda: self.log_contains('Starting runner: memcached'))
self.wait_until(lambda: self.output_count(lambda x: x >= 1))
container.stop()
self.wait_until(lambda: self.log_contains('Stopping runner: memcached'))
output = self.read_output_json()
proc.check_kill_and_wait()
# Check metadata is added
assert output[0]['docker']['container']['image'] == 'memcached:latest'
assert output[0]['docker']['container']['labels'] == {}
assert 'name' in output[0]['docker']['container']
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_docker_labels(self):
"""
Test docker autodiscover starts modules from labels
"""
import docker
docker_client = docker.from_env()
self.render_config_template(
autodiscover={
'docker': {
'hints.enabled': 'true',
},
},
)
proc = self.start_beat()
docker_client.images.pull('memcached:latest')
labels = {
'co.elastic.metrics/module': 'memcached',
'co.elastic.metrics/period': '1s',
'co.elastic.metrics/hosts': "'${data.host}:11211'",
}
container = docker_client.containers.run('memcached:latest', labels=labels, detach=True)
self.wait_until(lambda: self.log_contains('Starting runner: memcached'))
self.wait_until(lambda: self.output_count(lambda x: x >= 1))
container.stop()
self.wait_until(lambda: self.log_contains('Stopping runner: memcached'))
output = self.read_output_json()
proc.check_kill_and_wait()
# Check metadata is added
assert output[0]['docker']['container']['image'] == 'memcached:latest'
assert 'name' in output[0]['docker']['container']
|
en
| 0.407109
|
Test metricbeat autodiscover Test docker autodiscover starts modules from templates - condition: equals.docker.container.image: memcached:latest config: - module: memcached metricsets: ["stats"] period: 1s hosts: ["${data.host}:11211"] # Check metadata is added Test docker autodiscover starts modules from labels # Check metadata is added
| 2.163267
| 2
|
tests/bdd/steps/assertions.py
|
GQMai/mbed-cloud-sdk-python
| 12
|
6625358
|
import ast
import behave
from tests.bdd.steps.common import operator_compare
from tests.bdd.steps.common import is_equal
@behave.then("{name} object is {operator} {other_name}")
def assert_entity(context, name, operator, other_name):
first = context.entities[name]
second = context.entities[other_name]
operator_compare(first, second, operator)
@behave.then("{name} {attr} is {operator} {other_name} {other_attr}")
def assert_attrs(context, name, attr, operator, other_name, other_attr):
first = getattr(context.entities[name], attr)
second = getattr(context.entities[other_name], other_attr)
operator_compare(first, second, operator)
@behave.then("{name} {attr} is literally {value}")
def assert_literal(context, name, attr, value):
first = getattr(context.entities[name], attr)
is_equal(first, ast.literal_eval(value))
@behave.then("{name} {attr} has a {bool_like} value")
def assert_bool(context, name, attr, bool_like):
first = getattr(context.entities[name], attr)
is_equal(bool(first), bool_like == "truthy")
@behave.then("{stored} {operator} {name}")
@behave.then("{stored} {operator} {name} {attr}")
def assert_stored_attr(context, stored, operator, name, attr=None):
first = context.entities[name]
second = context.stored_results[stored]
if attr:
first = getattr(first, attr)
operator_compare(second, first, operator)
|
import ast
import behave
from tests.bdd.steps.common import operator_compare
from tests.bdd.steps.common import is_equal
@behave.then("{name} object is {operator} {other_name}")
def assert_entity(context, name, operator, other_name):
first = context.entities[name]
second = context.entities[other_name]
operator_compare(first, second, operator)
@behave.then("{name} {attr} is {operator} {other_name} {other_attr}")
def assert_attrs(context, name, attr, operator, other_name, other_attr):
first = getattr(context.entities[name], attr)
second = getattr(context.entities[other_name], other_attr)
operator_compare(first, second, operator)
@behave.then("{name} {attr} is literally {value}")
def assert_literal(context, name, attr, value):
first = getattr(context.entities[name], attr)
is_equal(first, ast.literal_eval(value))
@behave.then("{name} {attr} has a {bool_like} value")
def assert_bool(context, name, attr, bool_like):
first = getattr(context.entities[name], attr)
is_equal(bool(first), bool_like == "truthy")
@behave.then("{stored} {operator} {name}")
@behave.then("{stored} {operator} {name} {attr}")
def assert_stored_attr(context, stored, operator, name, attr=None):
first = context.entities[name]
second = context.stored_results[stored]
if attr:
first = getattr(first, attr)
operator_compare(second, first, operator)
|
none
| 1
| 2.633774
| 3
|
|
Data/Extraction/import.py
|
RohanMathur17/Song-Clustering
| 0
|
6625359
|
<reponame>RohanMathur17/Song-Clustering
import spotipy
import spotipy.util as util
from spotipy import oauth2
import pandas as pd
client_id = 'XXX'
client_secret = 'XXX'
token = oauth2.SpotifyClientCredentials(client_id = client_id , client_secret = client_secret)
cache_token = token.get_access_token()
sp = spotipy.Spotify(cache_token)
username = 'XXX'
playlist_id = 'XXX'
def get_playlist_tracks(username,playlist_id):
results = sp.user_playlist_tracks(username,playlist_id)
tracks = results['items']
while results['next']:
results = sp.next(results)
tracks.extend(results['items'])
return tracks
def analyze_playlist(creator, playlist_id):
# Create empty dataframe
playlist_features_list = ["artist","album","track_name", "track_id","acousticness",
"danceability","energy","key","loudness","mode", "speechiness","instrumentalness",
"liveness","valence","tempo", "duration_ms","time_signature"]
playlist_df = pd.DataFrame(columns = playlist_features_list)
# Loop through every track in the playlist, extract features and append the features to the playlist df
for track in tracks:
# Create empty dict
playlist_features = {}
# Get metadata
playlist_features["artist"] = track["track"]["album"]["artists"][0]["name"]
playlist_features["album"] = track["track"]["album"]["name"]
playlist_features["track_name"] = track["track"]["name"]
playlist_features["track_id"] = track["track"]["id"]
# Get audio features
audio_features = sp.audio_features(playlist_features["track_id"])[0]
for feature in playlist_features_list[4:]:
playlist_features[feature] = audio_features[feature]
# Concat the dfs
track_df = pd.DataFrame(playlist_features, index = [0])
playlist_df = pd.concat([playlist_df, track_df], ignore_index = True)
return playlist_df
tracks=get_playlist_tracks(username,playlist_id)
new_df=analyze_playlist(username,playlist_id)
new_df.to_csv('Country.csv' , index = False)
|
import spotipy
import spotipy.util as util
from spotipy import oauth2
import pandas as pd
client_id = 'XXX'
client_secret = 'XXX'
token = oauth2.SpotifyClientCredentials(client_id = client_id , client_secret = client_secret)
cache_token = token.get_access_token()
sp = spotipy.Spotify(cache_token)
username = 'XXX'
playlist_id = 'XXX'
def get_playlist_tracks(username,playlist_id):
results = sp.user_playlist_tracks(username,playlist_id)
tracks = results['items']
while results['next']:
results = sp.next(results)
tracks.extend(results['items'])
return tracks
def analyze_playlist(creator, playlist_id):
# Create empty dataframe
playlist_features_list = ["artist","album","track_name", "track_id","acousticness",
"danceability","energy","key","loudness","mode", "speechiness","instrumentalness",
"liveness","valence","tempo", "duration_ms","time_signature"]
playlist_df = pd.DataFrame(columns = playlist_features_list)
# Loop through every track in the playlist, extract features and append the features to the playlist df
for track in tracks:
# Create empty dict
playlist_features = {}
# Get metadata
playlist_features["artist"] = track["track"]["album"]["artists"][0]["name"]
playlist_features["album"] = track["track"]["album"]["name"]
playlist_features["track_name"] = track["track"]["name"]
playlist_features["track_id"] = track["track"]["id"]
# Get audio features
audio_features = sp.audio_features(playlist_features["track_id"])[0]
for feature in playlist_features_list[4:]:
playlist_features[feature] = audio_features[feature]
# Concat the dfs
track_df = pd.DataFrame(playlist_features, index = [0])
playlist_df = pd.concat([playlist_df, track_df], ignore_index = True)
return playlist_df
tracks=get_playlist_tracks(username,playlist_id)
new_df=analyze_playlist(username,playlist_id)
new_df.to_csv('Country.csv' , index = False)
|
en
| 0.849783
|
# Create empty dataframe # Loop through every track in the playlist, extract features and append the features to the playlist df # Create empty dict # Get metadata # Get audio features # Concat the dfs
| 3.282596
| 3
|
dsa/data_structures/hashtable/hashtable.py
|
joseph-zabaleta/data-structures-and-algorithms
| 0
|
6625360
|
<gh_stars>0
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def display(self):
collection = []
current = self.head
while current:
collection.append(current.data[0])
current = current.next
return collection
class Hashmap:
"""
This class is used to implement a hashmap
It has four available methods: Add, Get, Contains, Hash
"""
def __init__(self, size):
self.size = size
self.map = [None] * self.size
def add(self, key, value):
"""Add is reponsible for adding data to the hashmap datas structure
"""
hashed_key = self.hash(key)
if not self.map[hashed_key]:
self.map[hashed_key] = LinkedList()
self.map[hashed_key].add([key, value])
def get(self, key):
"""Get is responsible for taking in a key argument and returning the value for that key in the hashmap
"""
index = self.hash(key)
if self.map[index]:
ll = self.map[index]
while ll.head:
if ll.head.data[0] == key:
return ll.head.data[1]
else:
ll.head = ll.head.next
else:
return None
def contains(self, key):
"""Contains is reponsible for returning a bool for wether or not the provided key is within the data structure
"""
index = self.hash(key)
if self.map[index]:
collection = self.map[index].display()
if key in collection:
return True
else:
pass
return False
def hash(self, key):
"""
Hash is responsible for splitting they key, converting to ascii values, adding them together, multiply it by any prime number, then modulous by the size of the hashmap to return a valid index value within the hashmap to store that key.
"""
total = 0
for char in key:
total += ord(char)
total *= 19
hashed_key = total % self.size
return hashed_key
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def display(self):
collection = []
current = self.head
while current:
collection.append(current.data[0])
current = current.next
return collection
class Hashmap:
"""
This class is used to implement a hashmap
It has four available methods: Add, Get, Contains, Hash
"""
def __init__(self, size):
self.size = size
self.map = [None] * self.size
def add(self, key, value):
"""Add is reponsible for adding data to the hashmap datas structure
"""
hashed_key = self.hash(key)
if not self.map[hashed_key]:
self.map[hashed_key] = LinkedList()
self.map[hashed_key].add([key, value])
def get(self, key):
"""Get is responsible for taking in a key argument and returning the value for that key in the hashmap
"""
index = self.hash(key)
if self.map[index]:
ll = self.map[index]
while ll.head:
if ll.head.data[0] == key:
return ll.head.data[1]
else:
ll.head = ll.head.next
else:
return None
def contains(self, key):
"""Contains is reponsible for returning a bool for wether or not the provided key is within the data structure
"""
index = self.hash(key)
if self.map[index]:
collection = self.map[index].display()
if key in collection:
return True
else:
pass
return False
def hash(self, key):
"""
Hash is responsible for splitting they key, converting to ascii values, adding them together, multiply it by any prime number, then modulous by the size of the hashmap to return a valid index value within the hashmap to store that key.
"""
total = 0
for char in key:
total += ord(char)
total *= 19
hashed_key = total % self.size
return hashed_key
|
en
| 0.895849
|
This class is used to implement a hashmap It has four available methods: Add, Get, Contains, Hash Add is reponsible for adding data to the hashmap datas structure Get is responsible for taking in a key argument and returning the value for that key in the hashmap Contains is reponsible for returning a bool for wether or not the provided key is within the data structure Hash is responsible for splitting they key, converting to ascii values, adding them together, multiply it by any prime number, then modulous by the size of the hashmap to return a valid index value within the hashmap to store that key.
| 4.176287
| 4
|
java/iotivity-android/run_android_smoketests.py
|
jongsunglee/test_iotivity
| 4
|
6625361
|
<gh_stars>1-10
#!/usr/bin/python
#******************************************************************
#
# Copyright 2016 Intel Corporation All Rights Reserved.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
import os
import sys
import textwrap
import argparse
import platform
import subprocess
import multiprocessing
import time
import psutil
def find_avalible_console_port():
'''
Find an open port number that will be used for the avd console port. Start searching for the
at port number 5554 and continue incrementing port number till and open port is found.
Returns port number as a string
'''
# 5554 is the default console port used when starting an avd without specifying a port number
# since this is the default port number by default we start checking to see if that port is
# currently in use. If it is being used increase the port to the next even port number.
# Each instance of the avd uses two ports the console port and the avd port.The avd port is
# always console port +1 so we will check that that port is also open.
ret_port = 5554
nc = psutil.net_connections(kind='inet')
while True:
console_port_already_being_used = False
adb_port_already_being_used = False
for i in nc:
if(i.laddr[1] == ret_port):
console_port_already_being_used = True
if(i.laddr[1] == (ret_port + 1)):
adb_port_already_being_used = True
if((not console_port_already_being_used) and (not adb_port_already_being_used)):
return str(ret_port)
ret_port += 2 #for adv the port must be a multiple of 2
def start_avd(avd_name, console_port):
'''
Start up the avd specified by the avd_name parameter use the specify the console port that the avd will use
with the console_port parameter. The find_avalible_console_port() function should be used to determine an
open console_port that can be passed into this function.
note:
- all data on the avd will be wiped to start with a known starting condition.
- the avd will be started with the no-window option so there is no visual indication
that the avd is launched.
Keyword arguments:
avd_name -- the name of the created android virtual device
console_port -- the port number that will attempt to be used
by the when starting the avd
'''
command = 'emulator -avd ' + avd_name + ' -port ' + console_port + ' -wipe-data -no-boot-anim -no-window'
subprocess.Popen([command], shell=True)
def wait_for_avd_boot(console_port):
'''
After calling start_avd this function is used to wait for the avd to complete booting the console_port
option must match the console_port option used to start the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
#dev_cmd = 'adb -s emulator-' + console_port + ' shell getprop dev.bootcomplete'
#sys_cmd = 'adb -s emulator-' + console_port + ' shell getprop sys.boot_completed'
dev_cmd = ['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete']
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property dev.bootcomplete indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1);
else:
#print('Waiting for emulator to start')
time.sleep(1)
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'sys.boot_completed'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property sys.boot_completed indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1)
else:
#print('Waiting for emulator to start')
time.sleep(1)
def build_smoketests():
'''
Use gradlew to build the android smoke tests
'''
os.environ['ANDROID_NDK_HOME'] = os.path.abspath(os.getcwd() + '/../../extlibs/android/ndk/android-ndk-r10d')
command = './gradlew assembleAndroidTest'
subprocess.Popen([command], shell=True).wait()
def install_smoketests(console_port):
'''
Install the android smoke tests. Must run build_smoketests() before running this function
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' install -r ./build/outputs/apk/iotivity-android-debug-androidTest-unaligned.apk'
subprocess.Popen([command], shell=True).wait()
def run_smoketests(console_port):
'''
run the android smoke test
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' shell am instrument -w org.iotivity.base.test/android.test.InstrumentationTestRunner'
print command
subprocess.Popen([command], shell=True).wait()
def kill_avd(console_port):
'''
shut down the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' emu kill'
subprocess.Popen([command], shell=True).wait()
def create_avd(avd_name, target, abi):
'''
Create a new android virtual device
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
command = ['android', '-s', 'create', 'avd', '-f', '--name', avd_name, '--target', target, '--abi', abi]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Just use the default avd no need to specify extra options.
p.communicate('no')
p.wait()
def delete_avd(avd_name):
command = ['android', '-s', 'delete', 'avd', '--name', avd_name]
subprocess.Popen(command).wait();
def start_android_and_run_tests(target, abi):
'''
This function does the following
1. creates a new avd named smoke_test_avd_####
where the #### is the port number that is used to talk with the avd
the port number is assigned automatically.
2. starts the created avd
3. waits for the avd to boot
4. builds android smoke tests
5. installs the smoke tests on the avd
6. runs the smoke tests
7. shuts down the avd
8. deletes the avd
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
avalible_port = find_avalible_console_port()
avd_name = 'smoke_test_avd_' + avalible_port
create_avd(avd_name, target, abi)
start_avd(avd_name, avalible_port)
wait_for_avd_boot(avalible_port)
build_smoketests();
install_smoketests(avalible_port)
run_smoketests(avalible_port)
kill_avd(avalible_port)
delete_avd(avd_name)
def main(argv):
target = 'android-21'
abi = 'x86_64'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent('''\
If ABI is not specified it will default to x86_64.
If TARGET is not specified it will default to android-21.'''))
parser.add_argument('-a', '--abi', help='specify the abi of the android avd example "x86_64"')
parser.add_argument('-t', '--target', help='the andrdoid target example "android-21"')
args = parser.parse_args()
if (args.abi != None):
abi = args.abi
if (args.target != None):
target = args.target
print '*****************************************************'
print 'Running andriod smoke test with the following options'
print ' The android target is -- ', target
print ' The android abi is -- ', abi
print '*****************************************************'
start_android_and_run_tests(target, abi)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/python
#******************************************************************
#
# Copyright 2016 Intel Corporation All Rights Reserved.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
import os
import sys
import textwrap
import argparse
import platform
import subprocess
import multiprocessing
import time
import psutil
def find_avalible_console_port():
'''
Find an open port number that will be used for the avd console port. Start searching for the
at port number 5554 and continue incrementing port number till and open port is found.
Returns port number as a string
'''
# 5554 is the default console port used when starting an avd without specifying a port number
# since this is the default port number by default we start checking to see if that port is
# currently in use. If it is being used increase the port to the next even port number.
# Each instance of the avd uses two ports the console port and the avd port.The avd port is
# always console port +1 so we will check that that port is also open.
ret_port = 5554
nc = psutil.net_connections(kind='inet')
while True:
console_port_already_being_used = False
adb_port_already_being_used = False
for i in nc:
if(i.laddr[1] == ret_port):
console_port_already_being_used = True
if(i.laddr[1] == (ret_port + 1)):
adb_port_already_being_used = True
if((not console_port_already_being_used) and (not adb_port_already_being_used)):
return str(ret_port)
ret_port += 2 #for adv the port must be a multiple of 2
def start_avd(avd_name, console_port):
'''
Start up the avd specified by the avd_name parameter use the specify the console port that the avd will use
with the console_port parameter. The find_avalible_console_port() function should be used to determine an
open console_port that can be passed into this function.
note:
- all data on the avd will be wiped to start with a known starting condition.
- the avd will be started with the no-window option so there is no visual indication
that the avd is launched.
Keyword arguments:
avd_name -- the name of the created android virtual device
console_port -- the port number that will attempt to be used
by the when starting the avd
'''
command = 'emulator -avd ' + avd_name + ' -port ' + console_port + ' -wipe-data -no-boot-anim -no-window'
subprocess.Popen([command], shell=True)
def wait_for_avd_boot(console_port):
'''
After calling start_avd this function is used to wait for the avd to complete booting the console_port
option must match the console_port option used to start the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
#dev_cmd = 'adb -s emulator-' + console_port + ' shell getprop dev.bootcomplete'
#sys_cmd = 'adb -s emulator-' + console_port + ' shell getprop sys.boot_completed'
dev_cmd = ['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete']
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'dev.bootcomplete'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property dev.bootcomplete indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1);
else:
#print('Waiting for emulator to start')
time.sleep(1)
wait_for_boot = True
while wait_for_boot:
adb_process = subprocess.Popen(['adb', '-s', 'emulator-' + console_port, 'shell', 'getprop', 'sys.boot_completed'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = adb_process.communicate()
#print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode)
if(adb_process.returncode == 0):
if(output.startswith('1')):
print('property sys.boot_completed indicates that the avd boot has completed')
wait_for_boot = False
else:
#print('Waiting for emulator to start')
time.sleep(1)
else:
#print('Waiting for emulator to start')
time.sleep(1)
def build_smoketests():
'''
Use gradlew to build the android smoke tests
'''
os.environ['ANDROID_NDK_HOME'] = os.path.abspath(os.getcwd() + '/../../extlibs/android/ndk/android-ndk-r10d')
command = './gradlew assembleAndroidTest'
subprocess.Popen([command], shell=True).wait()
def install_smoketests(console_port):
'''
Install the android smoke tests. Must run build_smoketests() before running this function
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' install -r ./build/outputs/apk/iotivity-android-debug-androidTest-unaligned.apk'
subprocess.Popen([command], shell=True).wait()
def run_smoketests(console_port):
'''
run the android smoke test
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' shell am instrument -w org.iotivity.base.test/android.test.InstrumentationTestRunner'
print command
subprocess.Popen([command], shell=True).wait()
def kill_avd(console_port):
'''
shut down the avd
Keyword arguments:
console_port -- the port number that was specified when starting the avd
'''
command = 'adb -s emulator-' + console_port + ' emu kill'
subprocess.Popen([command], shell=True).wait()
def create_avd(avd_name, target, abi):
'''
Create a new android virtual device
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
command = ['android', '-s', 'create', 'avd', '-f', '--name', avd_name, '--target', target, '--abi', abi]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Just use the default avd no need to specify extra options.
p.communicate('no')
p.wait()
def delete_avd(avd_name):
command = ['android', '-s', 'delete', 'avd', '--name', avd_name]
subprocess.Popen(command).wait();
def start_android_and_run_tests(target, abi):
'''
This function does the following
1. creates a new avd named smoke_test_avd_####
where the #### is the port number that is used to talk with the avd
the port number is assigned automatically.
2. starts the created avd
3. waits for the avd to boot
4. builds android smoke tests
5. installs the smoke tests on the avd
6. runs the smoke tests
7. shuts down the avd
8. deletes the avd
Keyword arguments:
avd_name -- the name of the created avd
target -- the target Target ID of the system image to use with the new AVD. example android-21
abi -- the architecture type for the avd example armeabi, x86, or x86_64
run command $android list targets to get a list of targets and there Tag/ABIs
'''
avalible_port = find_avalible_console_port()
avd_name = 'smoke_test_avd_' + avalible_port
create_avd(avd_name, target, abi)
start_avd(avd_name, avalible_port)
wait_for_avd_boot(avalible_port)
build_smoketests();
install_smoketests(avalible_port)
run_smoketests(avalible_port)
kill_avd(avalible_port)
delete_avd(avd_name)
def main(argv):
target = 'android-21'
abi = 'x86_64'
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent('''\
If ABI is not specified it will default to x86_64.
If TARGET is not specified it will default to android-21.'''))
parser.add_argument('-a', '--abi', help='specify the abi of the android avd example "x86_64"')
parser.add_argument('-t', '--target', help='the andrdoid target example "android-21"')
args = parser.parse_args()
if (args.abi != None):
abi = args.abi
if (args.target != None):
target = args.target
print '*****************************************************'
print 'Running andriod smoke test with the following options'
print ' The android target is -- ', target
print ' The android abi is -- ', abi
print '*****************************************************'
start_android_and_run_tests(target, abi)
if __name__ == "__main__":
main(sys.argv[1:])
|
en
| 0.699604
|
#!/usr/bin/python #****************************************************************** # # Copyright 2016 Intel Corporation All Rights Reserved. # #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= Find an open port number that will be used for the avd console port. Start searching for the at port number 5554 and continue incrementing port number till and open port is found. Returns port number as a string # 5554 is the default console port used when starting an avd without specifying a port number # since this is the default port number by default we start checking to see if that port is # currently in use. If it is being used increase the port to the next even port number. # Each instance of the avd uses two ports the console port and the avd port.The avd port is # always console port +1 so we will check that that port is also open. #for adv the port must be a multiple of 2 Start up the avd specified by the avd_name parameter use the specify the console port that the avd will use with the console_port parameter. The find_avalible_console_port() function should be used to determine an open console_port that can be passed into this function. note: - all data on the avd will be wiped to start with a known starting condition. - the avd will be started with the no-window option so there is no visual indication that the avd is launched. Keyword arguments: avd_name -- the name of the created android virtual device console_port -- the port number that will attempt to be used by the when starting the avd After calling start_avd this function is used to wait for the avd to complete booting the console_port option must match the console_port option used to start the avd Keyword arguments: console_port -- the port number that was specified when starting the avd #dev_cmd = 'adb -s emulator-' + console_port + ' shell getprop dev.bootcomplete' #sys_cmd = 'adb -s emulator-' + console_port + ' shell getprop sys.boot_completed' #print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode) #print('Waiting for emulator to start') #print('Waiting for emulator to start') #print 'output = ' + str(output) + ' error = ' + str(error) + ' return code = ' + str(adb_process.returncode) #print('Waiting for emulator to start') #print('Waiting for emulator to start') Use gradlew to build the android smoke tests Install the android smoke tests. Must run build_smoketests() before running this function Keyword arguments: console_port -- the port number that was specified when starting the avd run the android smoke test Keyword arguments: console_port -- the port number that was specified when starting the avd shut down the avd Keyword arguments: console_port -- the port number that was specified when starting the avd Create a new android virtual device Keyword arguments: avd_name -- the name of the created avd target -- the target Target ID of the system image to use with the new AVD. example android-21 abi -- the architecture type for the avd example armeabi, x86, or x86_64 run command $android list targets to get a list of targets and there Tag/ABIs # Just use the default avd no need to specify extra options. This function does the following 1. creates a new avd named smoke_test_avd_#### where the #### is the port number that is used to talk with the avd the port number is assigned automatically. 2. starts the created avd 3. waits for the avd to boot 4. builds android smoke tests 5. installs the smoke tests on the avd 6. runs the smoke tests 7. shuts down the avd 8. deletes the avd Keyword arguments: avd_name -- the name of the created avd target -- the target Target ID of the system image to use with the new AVD. example android-21 abi -- the architecture type for the avd example armeabi, x86, or x86_64 run command $android list targets to get a list of targets and there Tag/ABIs \ If ABI is not specified it will default to x86_64. If TARGET is not specified it will default to android-21.
| 2.328809
| 2
|
src/biome/text/environment.py
|
ignacioct/biome-text
| 0
|
6625362
|
<reponame>ignacioct/biome-text<filename>src/biome/text/environment.py<gh_stars>0
CUDA_DEVICE = "CUDA_DEVICE"
|
CUDA_DEVICE = "CUDA_DEVICE"
|
none
| 1
| 1.056024
| 1
|
|
apple/bundling/plist_actions.bzl
|
jerrymarino/rules_apple
| 1
|
6625363
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions that operate on plist files."""
load(
"@bazel_skylib//lib:paths.bzl",
"paths"
)
load(
"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:file_support.bzl",
"file_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:plist_support.bzl",
"plist_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"product_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleVersionInfo",
)
load(
"@build_bazel_rules_apple//apple:utils.bzl",
"apple_action",
)
load(
"@build_bazel_rules_apple//common:attrs.bzl",
"attrs",
)
load(
"@build_bazel_rules_apple//common:providers.bzl",
"providers",
)
def _environment_plist_action(ctx):
"""Creates an action that extracts the Xcode environment plist.
Args:
ctx: The Skylark context.
Returns:
The plist file that contains the extracted environment.
"""
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
environment_plist = ctx.new_file(ctx.label.name + "_environment.plist")
platform_support.xcode_env_action(
ctx,
outputs=[environment_plist],
executable=ctx.executable._environment_plist,
arguments=[
"--platform",
platform_with_version,
"--output",
environment_plist.path,
],
)
return environment_plist
def _infoplist_minimum_os_pair(ctx):
"""Returns a info.plist entry of the min OS version for the current target.
Args:
ctx: The Skylark context.
Returns:
A dictionary containing the key/value pair to use in the targets Info.plist
to set the minimum OS version supported.
"""
if platform_support.platform_type(ctx) == apple_common.platform_type.macos:
plist_key = "LSMinimumSystemVersion"
else:
plist_key = "MinimumOSVersion"
return {plist_key: platform_support.minimum_os(ctx)}
def _merge_infoplists(ctx,
path_prefix,
input_plists,
bundle_id=None,
child_plists=[],
child_required_values=[],
exclude_executable_name=False,
extract_from_ctxt=False,
include_xcode_env=False,
resource_bundle_target_data=None,
version_keys_required=False):
"""Creates an action that merges Info.plists and converts them to binary.
This action merges multiple plists by shelling out to plisttool, then
compiles the final result into a single binary plist file.
Args:
ctx: The Skylark context.
path_prefix: A path prefix to apply in front of any intermediate files.
input_plists: The plist files to merge.
bundle_id: The bundle identifier to set in the output plist.
child_plists: A list of plists from child targets (such as extensions
or Watch apps) whose bundle IDs and version strings should be
validated against the compiled plist for consistency.
child_required_values: A list of pair containing a client target plist
and the pairs to check. For more information on the second item in the
pair, see plisttool's `child_plist_required_values`, as this is passed
straight throught to it.
exclude_executable_name: If True, the executable name will not be added to
the plist in the `CFBundleExecutable` key. This is mainly intended for
plists embedded in a command line tool.
extract_from_ctxt: If True, the ctx will also be inspect for additional
information values to be added into the final Info.plist. The ctxt
will also be checked to see if a PkgInfo file should be created.
include_xcode_env: If True, add the development environment and platform
platform info should be added to the plist (just like Xcode does).
resource_bundle_target_data: If the is for a resource bundle, the
AppleResourceBundleTargetData of the target that defined it. Will be
used to provide substitution values.
version_keys_required: If True, the merged Info.plist file must include
entries for CFBundleShortVersionString and CFBundleVersion.
Returns:
A struct with two fields: `output_plist`, a File object containing the
merged binary plist, and `pkginfo`, a File object containing the PkgInfo
file (or None, if no file was generated).
"""
if exclude_executable_name and not extract_from_ctxt:
fail('exclude_executable_name has no meaning without extract_from_ctxt.')
if resource_bundle_target_data and extract_from_ctxt:
fail("resource_bundle_target_data doesn't work with extract_from_ctxt.")
outputs = []
plists = [p.path for p in input_plists]
forced_plists = []
additional_plisttool_inputs = []
pkginfo = None
info_plist_options = {}
substitutions = {}
if version_keys_required:
info_plist_options["version_keys_required"] = True
if bundle_id:
substitutions["PRODUCT_BUNDLE_IDENTIFIER"] = bundle_id
# Pass the bundle_id as a plist and not a force_plist, this way the
# merging will validate that any existing value matches. Historically
# mismatches between the input Info.plist and rules bundle_id have
# been valid bugs, so this will still catch that.
plists.append(struct(CFBundleIdentifier=bundle_id))
output_plist = file_support.intermediate(
ctx, "%{name}-Info-binary.plist", prefix=path_prefix)
outputs.append(output_plist)
if child_plists:
for_control = struct(
**{str(p.owner): p.path for p in child_plists})
info_plist_options["child_plists"] = for_control
if child_required_values:
for_control = struct(
**{str(p.owner): v for (p, v) in child_required_values})
info_plist_options["child_plist_required_values"] = for_control
if resource_bundle_target_data:
substitutions["PRODUCT_NAME"] = resource_bundle_target_data.product_name
substitutions["BUNDLE_NAME"] = resource_bundle_target_data.bundle_name
if extract_from_ctxt:
# Extra things for info_plist_options
name = bundling_support.bundle_name(ctx)
substitutions["PRODUCT_NAME"] = name
if not exclude_executable_name:
substitutions["EXECUTABLE_NAME"] = name
forced_plists.append(struct(CFBundleExecutable=name))
if ctx.attr._needs_pkginfo:
pkginfo = file_support.intermediate(
ctx, "%{name}-PkgInfo", prefix=path_prefix)
outputs.append(pkginfo)
info_plist_options["pkginfo"] = pkginfo.path
bundle_name = bundling_support.bundle_name_with_extension(ctx)
substitutions["BUNDLE_NAME"] = bundle_name
version_info = providers.find_one(
attrs.get(ctx.attr, "version"), AppleBundleVersionInfo)
if version_info:
additional_plisttool_inputs.append(version_info.version_file)
info_plist_options["version_file"] = version_info.version_file.path
# Keys to be forced into the Info.plist file.
# b/67853874 - move this to the right platform specific rule(s).
launch_storyboard = attrs.get(ctx.file, "launch_storyboard")
if launch_storyboard:
short_name = paths.split_extension(launch_storyboard.basename)[0]
forced_plists.append(struct(UILaunchStoryboardName=short_name))
# Add any UIDeviceFamily entry needed.
families = platform_support.ui_device_family_plist_value(ctx)
if families:
forced_plists.append(struct(UIDeviceFamily=families))
# Collect any values for special product types that we have to manually put
# in (duplicating what Xcode apparently does under the hood).
product_type = product_support.product_type(ctx)
product_type_descriptor = product_support.product_type_descriptor(
product_type)
if product_type_descriptor:
if product_type_descriptor.additional_infoplist_values:
forced_plists.append(
struct(**product_type_descriptor.additional_infoplist_values)
)
if include_xcode_env:
environment_plist = _environment_plist_action(ctx)
additional_plisttool_inputs.append(environment_plist)
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
min_os_pair = _infoplist_minimum_os_pair(ctx)
forced_plists += [
environment_plist.path,
struct(
CFBundleSupportedPlatforms=[platform.name_in_plist],
DTPlatformName=platform.name_in_plist.lower(),
DTSDKName=platform_with_version,
**min_os_pair
),
]
# The default in Xcode is for PRODUCT_NAME and TARGET_NAME to be the same.
# Support TARGET_NAME for substitutions even though it might not be the
# target name in the BUILD file.
product_name = substitutions.get("PRODUCT_NAME")
if product_name:
substitutions["TARGET_NAME"] = product_name
# Tweak what is passed for 'target' to provide more more comment messages if
# something does go wrong.
if resource_bundle_target_data:
target = '%s (while bundling under "%s")' % (
str(resource_bundle_target_data.label), str(ctx.label))
else:
target = str(ctx.label)
control = struct(
plists=plists,
forced_plists=forced_plists,
output=output_plist.path,
binary=True,
info_plist_options=struct(**info_plist_options),
variable_substitutions=struct(**substitutions),
target=target,
)
control_file = file_support.intermediate(
ctx, "%{name}.plisttool-control", prefix=path_prefix)
ctx.file_action(
output=control_file,
content=control.to_json()
)
plist_support.plisttool_action(
ctx,
inputs=input_plists + child_plists + additional_plisttool_inputs,
outputs=outputs,
control_file=control_file,
mnemonic="CompileInfoPlist",
)
return struct(output_plist=output_plist, pkginfo=pkginfo)
# Define the loadable module that lists the exported symbols in this file.
plist_actions = struct(
merge_infoplists=_merge_infoplists,
)
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions that operate on plist files."""
load(
"@bazel_skylib//lib:paths.bzl",
"paths"
)
load(
"@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:file_support.bzl",
"file_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:plist_support.bzl",
"plist_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"product_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleVersionInfo",
)
load(
"@build_bazel_rules_apple//apple:utils.bzl",
"apple_action",
)
load(
"@build_bazel_rules_apple//common:attrs.bzl",
"attrs",
)
load(
"@build_bazel_rules_apple//common:providers.bzl",
"providers",
)
def _environment_plist_action(ctx):
"""Creates an action that extracts the Xcode environment plist.
Args:
ctx: The Skylark context.
Returns:
The plist file that contains the extracted environment.
"""
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
environment_plist = ctx.new_file(ctx.label.name + "_environment.plist")
platform_support.xcode_env_action(
ctx,
outputs=[environment_plist],
executable=ctx.executable._environment_plist,
arguments=[
"--platform",
platform_with_version,
"--output",
environment_plist.path,
],
)
return environment_plist
def _infoplist_minimum_os_pair(ctx):
"""Returns a info.plist entry of the min OS version for the current target.
Args:
ctx: The Skylark context.
Returns:
A dictionary containing the key/value pair to use in the targets Info.plist
to set the minimum OS version supported.
"""
if platform_support.platform_type(ctx) == apple_common.platform_type.macos:
plist_key = "LSMinimumSystemVersion"
else:
plist_key = "MinimumOSVersion"
return {plist_key: platform_support.minimum_os(ctx)}
def _merge_infoplists(ctx,
path_prefix,
input_plists,
bundle_id=None,
child_plists=[],
child_required_values=[],
exclude_executable_name=False,
extract_from_ctxt=False,
include_xcode_env=False,
resource_bundle_target_data=None,
version_keys_required=False):
"""Creates an action that merges Info.plists and converts them to binary.
This action merges multiple plists by shelling out to plisttool, then
compiles the final result into a single binary plist file.
Args:
ctx: The Skylark context.
path_prefix: A path prefix to apply in front of any intermediate files.
input_plists: The plist files to merge.
bundle_id: The bundle identifier to set in the output plist.
child_plists: A list of plists from child targets (such as extensions
or Watch apps) whose bundle IDs and version strings should be
validated against the compiled plist for consistency.
child_required_values: A list of pair containing a client target plist
and the pairs to check. For more information on the second item in the
pair, see plisttool's `child_plist_required_values`, as this is passed
straight throught to it.
exclude_executable_name: If True, the executable name will not be added to
the plist in the `CFBundleExecutable` key. This is mainly intended for
plists embedded in a command line tool.
extract_from_ctxt: If True, the ctx will also be inspect for additional
information values to be added into the final Info.plist. The ctxt
will also be checked to see if a PkgInfo file should be created.
include_xcode_env: If True, add the development environment and platform
platform info should be added to the plist (just like Xcode does).
resource_bundle_target_data: If the is for a resource bundle, the
AppleResourceBundleTargetData of the target that defined it. Will be
used to provide substitution values.
version_keys_required: If True, the merged Info.plist file must include
entries for CFBundleShortVersionString and CFBundleVersion.
Returns:
A struct with two fields: `output_plist`, a File object containing the
merged binary plist, and `pkginfo`, a File object containing the PkgInfo
file (or None, if no file was generated).
"""
if exclude_executable_name and not extract_from_ctxt:
fail('exclude_executable_name has no meaning without extract_from_ctxt.')
if resource_bundle_target_data and extract_from_ctxt:
fail("resource_bundle_target_data doesn't work with extract_from_ctxt.")
outputs = []
plists = [p.path for p in input_plists]
forced_plists = []
additional_plisttool_inputs = []
pkginfo = None
info_plist_options = {}
substitutions = {}
if version_keys_required:
info_plist_options["version_keys_required"] = True
if bundle_id:
substitutions["PRODUCT_BUNDLE_IDENTIFIER"] = bundle_id
# Pass the bundle_id as a plist and not a force_plist, this way the
# merging will validate that any existing value matches. Historically
# mismatches between the input Info.plist and rules bundle_id have
# been valid bugs, so this will still catch that.
plists.append(struct(CFBundleIdentifier=bundle_id))
output_plist = file_support.intermediate(
ctx, "%{name}-Info-binary.plist", prefix=path_prefix)
outputs.append(output_plist)
if child_plists:
for_control = struct(
**{str(p.owner): p.path for p in child_plists})
info_plist_options["child_plists"] = for_control
if child_required_values:
for_control = struct(
**{str(p.owner): v for (p, v) in child_required_values})
info_plist_options["child_plist_required_values"] = for_control
if resource_bundle_target_data:
substitutions["PRODUCT_NAME"] = resource_bundle_target_data.product_name
substitutions["BUNDLE_NAME"] = resource_bundle_target_data.bundle_name
if extract_from_ctxt:
# Extra things for info_plist_options
name = bundling_support.bundle_name(ctx)
substitutions["PRODUCT_NAME"] = name
if not exclude_executable_name:
substitutions["EXECUTABLE_NAME"] = name
forced_plists.append(struct(CFBundleExecutable=name))
if ctx.attr._needs_pkginfo:
pkginfo = file_support.intermediate(
ctx, "%{name}-PkgInfo", prefix=path_prefix)
outputs.append(pkginfo)
info_plist_options["pkginfo"] = pkginfo.path
bundle_name = bundling_support.bundle_name_with_extension(ctx)
substitutions["BUNDLE_NAME"] = bundle_name
version_info = providers.find_one(
attrs.get(ctx.attr, "version"), AppleBundleVersionInfo)
if version_info:
additional_plisttool_inputs.append(version_info.version_file)
info_plist_options["version_file"] = version_info.version_file.path
# Keys to be forced into the Info.plist file.
# b/67853874 - move this to the right platform specific rule(s).
launch_storyboard = attrs.get(ctx.file, "launch_storyboard")
if launch_storyboard:
short_name = paths.split_extension(launch_storyboard.basename)[0]
forced_plists.append(struct(UILaunchStoryboardName=short_name))
# Add any UIDeviceFamily entry needed.
families = platform_support.ui_device_family_plist_value(ctx)
if families:
forced_plists.append(struct(UIDeviceFamily=families))
# Collect any values for special product types that we have to manually put
# in (duplicating what Xcode apparently does under the hood).
product_type = product_support.product_type(ctx)
product_type_descriptor = product_support.product_type_descriptor(
product_type)
if product_type_descriptor:
if product_type_descriptor.additional_infoplist_values:
forced_plists.append(
struct(**product_type_descriptor.additional_infoplist_values)
)
if include_xcode_env:
environment_plist = _environment_plist_action(ctx)
additional_plisttool_inputs.append(environment_plist)
platform, sdk_version = platform_support.platform_and_sdk_version(ctx)
platform_with_version = platform.name_in_plist.lower() + str(sdk_version)
min_os_pair = _infoplist_minimum_os_pair(ctx)
forced_plists += [
environment_plist.path,
struct(
CFBundleSupportedPlatforms=[platform.name_in_plist],
DTPlatformName=platform.name_in_plist.lower(),
DTSDKName=platform_with_version,
**min_os_pair
),
]
# The default in Xcode is for PRODUCT_NAME and TARGET_NAME to be the same.
# Support TARGET_NAME for substitutions even though it might not be the
# target name in the BUILD file.
product_name = substitutions.get("PRODUCT_NAME")
if product_name:
substitutions["TARGET_NAME"] = product_name
# Tweak what is passed for 'target' to provide more more comment messages if
# something does go wrong.
if resource_bundle_target_data:
target = '%s (while bundling under "%s")' % (
str(resource_bundle_target_data.label), str(ctx.label))
else:
target = str(ctx.label)
control = struct(
plists=plists,
forced_plists=forced_plists,
output=output_plist.path,
binary=True,
info_plist_options=struct(**info_plist_options),
variable_substitutions=struct(**substitutions),
target=target,
)
control_file = file_support.intermediate(
ctx, "%{name}.plisttool-control", prefix=path_prefix)
ctx.file_action(
output=control_file,
content=control.to_json()
)
plist_support.plisttool_action(
ctx,
inputs=input_plists + child_plists + additional_plisttool_inputs,
outputs=outputs,
control_file=control_file,
mnemonic="CompileInfoPlist",
)
return struct(output_plist=output_plist, pkginfo=pkginfo)
# Define the loadable module that lists the exported symbols in this file.
plist_actions = struct(
merge_infoplists=_merge_infoplists,
)
|
en
| 0.833138
|
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Actions that operate on plist files. Creates an action that extracts the Xcode environment plist. Args: ctx: The Skylark context. Returns: The plist file that contains the extracted environment. Returns a info.plist entry of the min OS version for the current target. Args: ctx: The Skylark context. Returns: A dictionary containing the key/value pair to use in the targets Info.plist to set the minimum OS version supported. Creates an action that merges Info.plists and converts them to binary. This action merges multiple plists by shelling out to plisttool, then compiles the final result into a single binary plist file. Args: ctx: The Skylark context. path_prefix: A path prefix to apply in front of any intermediate files. input_plists: The plist files to merge. bundle_id: The bundle identifier to set in the output plist. child_plists: A list of plists from child targets (such as extensions or Watch apps) whose bundle IDs and version strings should be validated against the compiled plist for consistency. child_required_values: A list of pair containing a client target plist and the pairs to check. For more information on the second item in the pair, see plisttool's `child_plist_required_values`, as this is passed straight throught to it. exclude_executable_name: If True, the executable name will not be added to the plist in the `CFBundleExecutable` key. This is mainly intended for plists embedded in a command line tool. extract_from_ctxt: If True, the ctx will also be inspect for additional information values to be added into the final Info.plist. The ctxt will also be checked to see if a PkgInfo file should be created. include_xcode_env: If True, add the development environment and platform platform info should be added to the plist (just like Xcode does). resource_bundle_target_data: If the is for a resource bundle, the AppleResourceBundleTargetData of the target that defined it. Will be used to provide substitution values. version_keys_required: If True, the merged Info.plist file must include entries for CFBundleShortVersionString and CFBundleVersion. Returns: A struct with two fields: `output_plist`, a File object containing the merged binary plist, and `pkginfo`, a File object containing the PkgInfo file (or None, if no file was generated). # Pass the bundle_id as a plist and not a force_plist, this way the # merging will validate that any existing value matches. Historically # mismatches between the input Info.plist and rules bundle_id have # been valid bugs, so this will still catch that. # Extra things for info_plist_options # Keys to be forced into the Info.plist file. # b/67853874 - move this to the right platform specific rule(s). # Add any UIDeviceFamily entry needed. # Collect any values for special product types that we have to manually put # in (duplicating what Xcode apparently does under the hood). # The default in Xcode is for PRODUCT_NAME and TARGET_NAME to be the same. # Support TARGET_NAME for substitutions even though it might not be the # target name in the BUILD file. # Tweak what is passed for 'target' to provide more more comment messages if # something does go wrong. # Define the loadable module that lists the exported symbols in this file.
| 1.553267
| 2
|
scripts/vae/utils/plot.py
|
qiuhuachuan/pyprobml
| 0
|
6625364
|
<reponame>qiuhuachuan/pyprobml
import matplotlib.pyplot as plt
import torchvision.utils as vutils
from einops import rearrange
def plot(model_samples, title, figsize=(10,30), num_of_images_per_row=5):
plt.figure(figsize=figsize)
img1 = vutils.make_grid(model_samples, nrow=num_of_images_per_row).cpu().detach().numpy()
plt.title(title)
plt.imshow(rearrange(img1, "c h w -> h w c"))
plt.show()
def plot_samples(vaes, num=25, figsize=(10,30), num_of_images_per_row=5):
if hasattr(vaes, '__iter__'):
for vae in vaes:
plot_samples(vae, num, figsize, num_of_images_per_row)
else:
model_samples = vaes.get_samples(num)
title = f"Samples from {vaes.model_name}"
plot(model_samples, title, figsize, num_of_images_per_row)
def plot_reconstruction(vaes, batch, num_of_samples=5, num_of_images_per_row=5, figsize=(10, 30)):
x, y = batch
img = x[:num_of_samples, :, :, :]
plot(img, "Original", figsize=figsize, num_of_images_per_row=num_of_images_per_row)
if hasattr(vaes, '__iter__'):
for vae in vaes:
title = f"Reconstruction from {vae.model_name}"
plot(vae(img), title, figsize, num_of_images_per_row)
else:
title = f"Reconstruction from {vaes.model_name}"
plot(vaes(img), title, figsize, num_of_images_per_row)
|
import matplotlib.pyplot as plt
import torchvision.utils as vutils
from einops import rearrange
def plot(model_samples, title, figsize=(10,30), num_of_images_per_row=5):
plt.figure(figsize=figsize)
img1 = vutils.make_grid(model_samples, nrow=num_of_images_per_row).cpu().detach().numpy()
plt.title(title)
plt.imshow(rearrange(img1, "c h w -> h w c"))
plt.show()
def plot_samples(vaes, num=25, figsize=(10,30), num_of_images_per_row=5):
if hasattr(vaes, '__iter__'):
for vae in vaes:
plot_samples(vae, num, figsize, num_of_images_per_row)
else:
model_samples = vaes.get_samples(num)
title = f"Samples from {vaes.model_name}"
plot(model_samples, title, figsize, num_of_images_per_row)
def plot_reconstruction(vaes, batch, num_of_samples=5, num_of_images_per_row=5, figsize=(10, 30)):
x, y = batch
img = x[:num_of_samples, :, :, :]
plot(img, "Original", figsize=figsize, num_of_images_per_row=num_of_images_per_row)
if hasattr(vaes, '__iter__'):
for vae in vaes:
title = f"Reconstruction from {vae.model_name}"
plot(vae(img), title, figsize, num_of_images_per_row)
else:
title = f"Reconstruction from {vaes.model_name}"
plot(vaes(img), title, figsize, num_of_images_per_row)
|
none
| 1
| 2.412701
| 2
|
|
simple_rl/amdp/abstr_domains/cleanup/AbstractCleanupMDPClass.py
|
yoonseon-oh/simple_rl
| 2
|
6625365
|
# Python imports
from __future__ import print_function
from collections import defaultdict
import copy
# Other imports
from simple_rl.mdp.MDPClass import MDP
from simple_rl.planning import ValueIteration
from simple_rl.amdp.AMDPTaskNodesClass import NonPrimitiveAbstractTask, RootTaskNode
from simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupL1StateClass import *
from simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupStateMapperClass import AbstractCleanupL1StateMapper
from simple_rl.tasks.cleanup.cleanup_state import CleanUpState
class CleanupL1GroundedAction(NonPrimitiveAbstractTask):
def __init__(self, l1_action_string, subtasks, l0_domain):
'''
Args:
l1_action_string (str)
subtasks (list)
l0_domain (CleanUpMDP)
'''
self.action = l1_action_string
self.l0_domain = l0_domain
self.lifted_action = self.grounded_to_lifted_action(l1_action_string)
tf, rf = self._terminal_function, self._reward_function
NonPrimitiveAbstractTask.__init__(self, l1_action_string, subtasks, tf, rf)
def _terminal_function(self, state):
'''
Args:
state (CleanUpState)
Returns:
is_terminal (bool)
'''
assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))
def _robot_door_terminal_func(s, door_color):
return s.robot.current_door == door_color
def _robot_room_terminal_func(s, room_color):
return s.robot.current_room == room_color and s.robot.current_door == ''
def _robot_to_block_terminal_func(s, block_color):
return s.robot.adjacent_block == block_color
def _block_to_door_terminal_func(s, block_color, door_color):
for block in s.blocks:
if block.block_color == block_color and block.current_door == door_color:
return True
return False
def _block_to_room_terminal_func(s, block_color, room_color):
for block in s.blocks:
if block.block_color == block_color and block.current_room == room_color and block.current_door == '':
return True
return False
state_mapper = AbstractCleanupL1StateMapper(self.l0_domain)
projected_state = state_mapper.map_state(state)
action_parameter = self.grounded_to_action_parameter(self.action)
if self.lifted_action == 'toDoor':
return _robot_door_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'toRoom':
return _robot_room_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'toObject':
return _robot_to_block_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'objectToDoor':
return _block_to_door_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)
if self.lifted_action == 'objectToRoom':
return _block_to_room_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)
raise ValueError('Lifted action {} not supported yet'.format(self.lifted_action))
def _reward_function(self, state, action):
assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))
next_state = self.l0_domain.transition_func(state, action)
return 1. if self._terminal_function(next_state) else 0.
# -------------------------------
# L1 Action Helper Functions
# -------------------------------
@staticmethod
def grounded_to_lifted_action(grounded_action_str):
return grounded_action_str.split('(')[0]
@staticmethod
def grounded_to_action_parameter(grounded_action_str):
return grounded_action_str.split('(')[1].split(')')[0]
@staticmethod
def door_name_to_room_colors(door_name):
return door_name.split('_')
@staticmethod
def get_other_room_color(state, door_name):
connected_rooms = CleanupL1GroundedAction.door_name_to_room_colors(door_name)
if state.robot.current_room == connected_rooms[0]:
return connected_rooms[1]
if state.robot.current_room == connected_rooms[1]:
return connected_rooms[0]
return ''
class CleanupRootGroundedAction(RootTaskNode):
def __init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func):
self.action = action_str
RootTaskNode.__init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func)
class CleanupL1MDP(MDP):
LIFTED_ACTIONS = ['toDoor', 'toRoom', 'toObject', 'objectToDoor', 'objectToRoom']
# -------------------------------
# Level 1 MDP description
# -------------------------------
def __init__(self, l0_domain):
'''
Args:
l0_domain (CleanUpMDP)
'''
self.l0_domain = l0_domain
state_mapper = AbstractCleanupL1StateMapper(l0_domain)
l1_init_state = state_mapper.map_state(l0_domain.init_state)
grounded_actions = CleanupL1MDP.ground_actions(l1_init_state)
self.terminal_func = self._is_goal_state
MDP.__init__(self, grounded_actions, self._transition_function, self._reward_function, l1_init_state)
def _is_goal_state(self, state):
for block in state.blocks: # type: CleanupL1Block
if block.block_color == self.l0_domain.task.block_color:
return block.current_room == self.l0_domain.task.goal_room_color and \
state.robot.current_room == self.l0_domain.task.goal_room_color
raise ValueError('Did not find an L1 Block object with color {}'.format(self.l0_domain.task.block_color))
def _reward_function(self, state, action):
'''
Args:
state (CleanupL1State)
action (str)
Returns:
reward (float)
'''
next_state = self._transition_function(state, action)
return 1. if self._is_goal_state(next_state) else 0.
def _transition_function(self, state, action):
'''
Args:
state (CleanupL1State)
action (str): grounded action
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
lifted_action = CleanupL1GroundedAction.grounded_to_lifted_action(action)
if lifted_action == 'toDoor':
target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_door(state, target_door_name)
if lifted_action == 'toRoom':
destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_room(state, destination_room)
if lifted_action == 'toObject':
block_color = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_block(state, block_color)
if lifted_action == 'objectToDoor':
target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)
# next_state = self._move_agent_to_door(state, target_door_name)
next_state = self._move_block_to_door(next_state, target_door_name)
if lifted_action == 'objectToRoom':
destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_room(state, destination_room)
next_state = self._move_block_to_room(next_state, destination_room)
next_state.set_terminal(self._is_goal_state(next_state))
return next_state
@classmethod
def ground_actions(cls, l1_state):
'''
Given a list of lifted/parameterized actions and the L0 cleanup domain,
generate a list of grounded actions based on the attributes of the objects
instantiated in the L0 domain.
Args:
l1_state (CleanupL1State): underlying ground level MDP
Returns:
actions (list): grounded actions
'''
grounded_actions = []
for door in l1_state.doors: # type: CleanupL1Door
grounded_actions.append(cls.LIFTED_ACTIONS[0] + '(' + str(door) + ')')
grounded_actions.append(cls.LIFTED_ACTIONS[3] + '(' + str(door) + ')')
for room in l1_state.rooms: # type: CleanupL1Room
grounded_actions.append(cls.LIFTED_ACTIONS[1] + '(' + str(room) + ')')
grounded_actions.append(cls.LIFTED_ACTIONS[4] + '(' + str(room) + ')')
for block in l1_state.blocks: # type: CleanupL1Block
grounded_actions.append(cls.LIFTED_ACTIONS[2] + '(' + str(block.block_color) + ')')
return grounded_actions
# -----------------------------------
# Agent Navigation Helper functions
# -----------------------------------
@staticmethod
def _move_agent_to_door(state, door_name):
'''
If the specified door connects the agent's current room, then it may transition to the door.
Args:
state (CleanupL1State)
door_name (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
destination_door = state.get_l1_door_for_color(door_name)
if destination_door and state.robot.current_room in door_name:
# If there is already a block at the door, then move it to the other room
block = state.get_l1_block_for_color(state.robot.adjacent_block)
if block:
if block.current_door == door_name:
other_room = CleanupL1GroundedAction.get_other_room_color(state, door_name)
next_state = CleanupL1MDP._move_block_to_room(state, other_room)
next_state.robot.current_door = door_name
next_state.robot.current_room = destination_door.current_room
return next_state
@staticmethod
def _move_agent_to_room(state, destination_room_color):
'''
Move the agent to the specified room if it is at a door connecting it to the said room.
Args:
state (CleanupL1State)
destination_room_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
if destination_room_color in state.robot.current_door:
next_state.robot.current_room = destination_room_color
next_state.robot.current_door = ''
return next_state
@staticmethod
def _move_agent_to_block(state, block_color):
'''
Move the agent to the specified block if they are both in the same room.
Args:
state (CleanupL1State)
block_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
target_block = state.get_l1_block_for_color(block_color)
if target_block:
if target_block.current_room == state.robot.current_room:
next_state.robot.adjacent_block = target_block.block_color
next_state.robot.current_door = ''
return next_state
# -----------------------------------
# Block Navigation Helper functions
# -----------------------------------
@staticmethod
def _move_block_to_door(state, door_name):
'''
Move the agent's adjacent block to the specified door if they are in a room connected by said door.
Args:
state (CleanupL1State)
door_name (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)
destination_door = next_state.get_l1_door_for_color(door_name)
if block and destination_door:
if state.robot.current_room in door_name and block.current_room in door_name:
next_state.robot.current_room = block.current_room
next_state.robot.current_door = ''
block.current_door = door_name
block.current_room = destination_door.current_room
return next_state
@staticmethod
def _move_block_to_room(state, destination_room_color):
'''
Move the block to the specified room if the block is at a door connecting said room.
Args:
state (CleanupL1State)
destination_room_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)
if block:
if destination_room_color in block.current_door:
block.current_room = destination_room_color
block.current_door = ''
return next_state
# -----------------------------------
# Debug functions
# -----------------------------------
def debug_l1_domain():
from simple_rl.tasks.cleanup.cleanup_block import CleanUpBlock
from simple_rl.tasks.cleanup.cleanup_door import CleanUpDoor
from simple_rl.tasks.cleanup.cleanup_room import CleanUpRoom
from simple_rl.tasks.cleanup.cleanup_task import CleanUpTask
from simple_rl.tasks.cleanup.CleanupMDPClass import CleanUpMDP
def get_l1_policy(domain):
vi = ValueIteration(domain, sample_rate=1)
vi.run_vi()
policy = defaultdict()
action_seq, state_seq = vi.plan(domain.init_state)
print('Plan for {}:'.format(domain))
for i in range(len(action_seq)):
print("\tpi[{}] -> {}\n".format(state_seq[i], action_seq[i]))
policy[state_seq[i]] = action_seq[i]
return policy
task = CleanUpTask("purple", "red")
room1 = CleanUpRoom("room1", [(x, y) for x in range(5) for y in range(3)], "blue")
block1 = CleanUpBlock("block1", 1, 1, color="green")
block2 = CleanUpBlock("block2", 2, 4, color="purple")
block3 = CleanUpBlock("block3", 8, 1, color="orange")
room2 = CleanUpRoom("room2", [(x, y) for x in range(5, 10) for y in range(3)], color="red")
room3 = CleanUpRoom("room3", [(x, y) for x in range(0, 10) for y in range(3, 6)], color="yellow")
rooms = [room1, room2, room3]
blocks = [block1, block2, block3]
doors = [CleanUpDoor(4, 0), CleanUpDoor(3, 2)]
mdp = CleanUpMDP(task, rooms=rooms, doors=doors, blocks=blocks)
amdp = CleanupL1MDP(mdp)
get_l1_policy(amdp)
|
# Python imports
from __future__ import print_function
from collections import defaultdict
import copy
# Other imports
from simple_rl.mdp.MDPClass import MDP
from simple_rl.planning import ValueIteration
from simple_rl.amdp.AMDPTaskNodesClass import NonPrimitiveAbstractTask, RootTaskNode
from simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupL1StateClass import *
from simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupStateMapperClass import AbstractCleanupL1StateMapper
from simple_rl.tasks.cleanup.cleanup_state import CleanUpState
class CleanupL1GroundedAction(NonPrimitiveAbstractTask):
def __init__(self, l1_action_string, subtasks, l0_domain):
'''
Args:
l1_action_string (str)
subtasks (list)
l0_domain (CleanUpMDP)
'''
self.action = l1_action_string
self.l0_domain = l0_domain
self.lifted_action = self.grounded_to_lifted_action(l1_action_string)
tf, rf = self._terminal_function, self._reward_function
NonPrimitiveAbstractTask.__init__(self, l1_action_string, subtasks, tf, rf)
def _terminal_function(self, state):
'''
Args:
state (CleanUpState)
Returns:
is_terminal (bool)
'''
assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))
def _robot_door_terminal_func(s, door_color):
return s.robot.current_door == door_color
def _robot_room_terminal_func(s, room_color):
return s.robot.current_room == room_color and s.robot.current_door == ''
def _robot_to_block_terminal_func(s, block_color):
return s.robot.adjacent_block == block_color
def _block_to_door_terminal_func(s, block_color, door_color):
for block in s.blocks:
if block.block_color == block_color and block.current_door == door_color:
return True
return False
def _block_to_room_terminal_func(s, block_color, room_color):
for block in s.blocks:
if block.block_color == block_color and block.current_room == room_color and block.current_door == '':
return True
return False
state_mapper = AbstractCleanupL1StateMapper(self.l0_domain)
projected_state = state_mapper.map_state(state)
action_parameter = self.grounded_to_action_parameter(self.action)
if self.lifted_action == 'toDoor':
return _robot_door_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'toRoom':
return _robot_room_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'toObject':
return _robot_to_block_terminal_func(projected_state, action_parameter)
if self.lifted_action == 'objectToDoor':
return _block_to_door_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)
if self.lifted_action == 'objectToRoom':
return _block_to_room_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)
raise ValueError('Lifted action {} not supported yet'.format(self.lifted_action))
def _reward_function(self, state, action):
assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))
next_state = self.l0_domain.transition_func(state, action)
return 1. if self._terminal_function(next_state) else 0.
# -------------------------------
# L1 Action Helper Functions
# -------------------------------
@staticmethod
def grounded_to_lifted_action(grounded_action_str):
return grounded_action_str.split('(')[0]
@staticmethod
def grounded_to_action_parameter(grounded_action_str):
return grounded_action_str.split('(')[1].split(')')[0]
@staticmethod
def door_name_to_room_colors(door_name):
return door_name.split('_')
@staticmethod
def get_other_room_color(state, door_name):
connected_rooms = CleanupL1GroundedAction.door_name_to_room_colors(door_name)
if state.robot.current_room == connected_rooms[0]:
return connected_rooms[1]
if state.robot.current_room == connected_rooms[1]:
return connected_rooms[0]
return ''
class CleanupRootGroundedAction(RootTaskNode):
def __init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func):
self.action = action_str
RootTaskNode.__init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func)
class CleanupL1MDP(MDP):
LIFTED_ACTIONS = ['toDoor', 'toRoom', 'toObject', 'objectToDoor', 'objectToRoom']
# -------------------------------
# Level 1 MDP description
# -------------------------------
def __init__(self, l0_domain):
'''
Args:
l0_domain (CleanUpMDP)
'''
self.l0_domain = l0_domain
state_mapper = AbstractCleanupL1StateMapper(l0_domain)
l1_init_state = state_mapper.map_state(l0_domain.init_state)
grounded_actions = CleanupL1MDP.ground_actions(l1_init_state)
self.terminal_func = self._is_goal_state
MDP.__init__(self, grounded_actions, self._transition_function, self._reward_function, l1_init_state)
def _is_goal_state(self, state):
for block in state.blocks: # type: CleanupL1Block
if block.block_color == self.l0_domain.task.block_color:
return block.current_room == self.l0_domain.task.goal_room_color and \
state.robot.current_room == self.l0_domain.task.goal_room_color
raise ValueError('Did not find an L1 Block object with color {}'.format(self.l0_domain.task.block_color))
def _reward_function(self, state, action):
'''
Args:
state (CleanupL1State)
action (str)
Returns:
reward (float)
'''
next_state = self._transition_function(state, action)
return 1. if self._is_goal_state(next_state) else 0.
def _transition_function(self, state, action):
'''
Args:
state (CleanupL1State)
action (str): grounded action
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
lifted_action = CleanupL1GroundedAction.grounded_to_lifted_action(action)
if lifted_action == 'toDoor':
target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_door(state, target_door_name)
if lifted_action == 'toRoom':
destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_room(state, destination_room)
if lifted_action == 'toObject':
block_color = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_block(state, block_color)
if lifted_action == 'objectToDoor':
target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)
# next_state = self._move_agent_to_door(state, target_door_name)
next_state = self._move_block_to_door(next_state, target_door_name)
if lifted_action == 'objectToRoom':
destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)
next_state = self._move_agent_to_room(state, destination_room)
next_state = self._move_block_to_room(next_state, destination_room)
next_state.set_terminal(self._is_goal_state(next_state))
return next_state
@classmethod
def ground_actions(cls, l1_state):
'''
Given a list of lifted/parameterized actions and the L0 cleanup domain,
generate a list of grounded actions based on the attributes of the objects
instantiated in the L0 domain.
Args:
l1_state (CleanupL1State): underlying ground level MDP
Returns:
actions (list): grounded actions
'''
grounded_actions = []
for door in l1_state.doors: # type: CleanupL1Door
grounded_actions.append(cls.LIFTED_ACTIONS[0] + '(' + str(door) + ')')
grounded_actions.append(cls.LIFTED_ACTIONS[3] + '(' + str(door) + ')')
for room in l1_state.rooms: # type: CleanupL1Room
grounded_actions.append(cls.LIFTED_ACTIONS[1] + '(' + str(room) + ')')
grounded_actions.append(cls.LIFTED_ACTIONS[4] + '(' + str(room) + ')')
for block in l1_state.blocks: # type: CleanupL1Block
grounded_actions.append(cls.LIFTED_ACTIONS[2] + '(' + str(block.block_color) + ')')
return grounded_actions
# -----------------------------------
# Agent Navigation Helper functions
# -----------------------------------
@staticmethod
def _move_agent_to_door(state, door_name):
'''
If the specified door connects the agent's current room, then it may transition to the door.
Args:
state (CleanupL1State)
door_name (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
destination_door = state.get_l1_door_for_color(door_name)
if destination_door and state.robot.current_room in door_name:
# If there is already a block at the door, then move it to the other room
block = state.get_l1_block_for_color(state.robot.adjacent_block)
if block:
if block.current_door == door_name:
other_room = CleanupL1GroundedAction.get_other_room_color(state, door_name)
next_state = CleanupL1MDP._move_block_to_room(state, other_room)
next_state.robot.current_door = door_name
next_state.robot.current_room = destination_door.current_room
return next_state
@staticmethod
def _move_agent_to_room(state, destination_room_color):
'''
Move the agent to the specified room if it is at a door connecting it to the said room.
Args:
state (CleanupL1State)
destination_room_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
if destination_room_color in state.robot.current_door:
next_state.robot.current_room = destination_room_color
next_state.robot.current_door = ''
return next_state
@staticmethod
def _move_agent_to_block(state, block_color):
'''
Move the agent to the specified block if they are both in the same room.
Args:
state (CleanupL1State)
block_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
target_block = state.get_l1_block_for_color(block_color)
if target_block:
if target_block.current_room == state.robot.current_room:
next_state.robot.adjacent_block = target_block.block_color
next_state.robot.current_door = ''
return next_state
# -----------------------------------
# Block Navigation Helper functions
# -----------------------------------
@staticmethod
def _move_block_to_door(state, door_name):
'''
Move the agent's adjacent block to the specified door if they are in a room connected by said door.
Args:
state (CleanupL1State)
door_name (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)
destination_door = next_state.get_l1_door_for_color(door_name)
if block and destination_door:
if state.robot.current_room in door_name and block.current_room in door_name:
next_state.robot.current_room = block.current_room
next_state.robot.current_door = ''
block.current_door = door_name
block.current_room = destination_door.current_room
return next_state
@staticmethod
def _move_block_to_room(state, destination_room_color):
'''
Move the block to the specified room if the block is at a door connecting said room.
Args:
state (CleanupL1State)
destination_room_color (str)
Returns:
next_state (CleanupL1State)
'''
next_state = copy.deepcopy(state)
block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)
if block:
if destination_room_color in block.current_door:
block.current_room = destination_room_color
block.current_door = ''
return next_state
# -----------------------------------
# Debug functions
# -----------------------------------
def debug_l1_domain():
from simple_rl.tasks.cleanup.cleanup_block import CleanUpBlock
from simple_rl.tasks.cleanup.cleanup_door import CleanUpDoor
from simple_rl.tasks.cleanup.cleanup_room import CleanUpRoom
from simple_rl.tasks.cleanup.cleanup_task import CleanUpTask
from simple_rl.tasks.cleanup.CleanupMDPClass import CleanUpMDP
def get_l1_policy(domain):
vi = ValueIteration(domain, sample_rate=1)
vi.run_vi()
policy = defaultdict()
action_seq, state_seq = vi.plan(domain.init_state)
print('Plan for {}:'.format(domain))
for i in range(len(action_seq)):
print("\tpi[{}] -> {}\n".format(state_seq[i], action_seq[i]))
policy[state_seq[i]] = action_seq[i]
return policy
task = CleanUpTask("purple", "red")
room1 = CleanUpRoom("room1", [(x, y) for x in range(5) for y in range(3)], "blue")
block1 = CleanUpBlock("block1", 1, 1, color="green")
block2 = CleanUpBlock("block2", 2, 4, color="purple")
block3 = CleanUpBlock("block3", 8, 1, color="orange")
room2 = CleanUpRoom("room2", [(x, y) for x in range(5, 10) for y in range(3)], color="red")
room3 = CleanUpRoom("room3", [(x, y) for x in range(0, 10) for y in range(3, 6)], color="yellow")
rooms = [room1, room2, room3]
blocks = [block1, block2, block3]
doors = [CleanUpDoor(4, 0), CleanUpDoor(3, 2)]
mdp = CleanUpMDP(task, rooms=rooms, doors=doors, blocks=blocks)
amdp = CleanupL1MDP(mdp)
get_l1_policy(amdp)
|
en
| 0.65893
|
# Python imports # Other imports Args: l1_action_string (str) subtasks (list) l0_domain (CleanUpMDP) Args: state (CleanUpState) Returns: is_terminal (bool) # ------------------------------- # L1 Action Helper Functions # ------------------------------- # ------------------------------- # Level 1 MDP description # ------------------------------- Args: l0_domain (CleanUpMDP) # type: CleanupL1Block Args: state (CleanupL1State) action (str) Returns: reward (float) Args: state (CleanupL1State) action (str): grounded action Returns: next_state (CleanupL1State) # next_state = self._move_agent_to_door(state, target_door_name) Given a list of lifted/parameterized actions and the L0 cleanup domain, generate a list of grounded actions based on the attributes of the objects instantiated in the L0 domain. Args: l1_state (CleanupL1State): underlying ground level MDP Returns: actions (list): grounded actions # type: CleanupL1Door # type: CleanupL1Room # type: CleanupL1Block # ----------------------------------- # Agent Navigation Helper functions # ----------------------------------- If the specified door connects the agent's current room, then it may transition to the door. Args: state (CleanupL1State) door_name (str) Returns: next_state (CleanupL1State) # If there is already a block at the door, then move it to the other room Move the agent to the specified room if it is at a door connecting it to the said room. Args: state (CleanupL1State) destination_room_color (str) Returns: next_state (CleanupL1State) Move the agent to the specified block if they are both in the same room. Args: state (CleanupL1State) block_color (str) Returns: next_state (CleanupL1State) # ----------------------------------- # Block Navigation Helper functions # ----------------------------------- Move the agent's adjacent block to the specified door if they are in a room connected by said door. Args: state (CleanupL1State) door_name (str) Returns: next_state (CleanupL1State) Move the block to the specified room if the block is at a door connecting said room. Args: state (CleanupL1State) destination_room_color (str) Returns: next_state (CleanupL1State) # ----------------------------------- # Debug functions # -----------------------------------
| 2.037
| 2
|
Codes/Day-057/blog/main.py
|
MdGhulamAzadAnsari/100-Days-of-Code
| 3
|
6625366
|
from flask import Flask, render_template
from post import Post
import requests
posts = requests.get("https://api.npoint.io/5abcca6f4e39b4955965").json()
post_objects = []
for post in posts:
post_obj = Post(post["id"], post["title"], post["subtitle"], post["body"])
post_objects.append(post_obj)
app = Flask(__name__)
@app.route('/')
def get_all_posts():
return render_template("index.html", all_posts=post_objects)
@app.route("/post/<int:index>")
def show_post(index):
requested_post = None
for blog_post in post_objects:
if blog_post.id == index:
requested_post = blog_post
return render_template("post.html", post=requested_post)
if __name__ == "__main__":
app.run(debug=True)
|
from flask import Flask, render_template
from post import Post
import requests
posts = requests.get("https://api.npoint.io/5abcca6f4e39b4955965").json()
post_objects = []
for post in posts:
post_obj = Post(post["id"], post["title"], post["subtitle"], post["body"])
post_objects.append(post_obj)
app = Flask(__name__)
@app.route('/')
def get_all_posts():
return render_template("index.html", all_posts=post_objects)
@app.route("/post/<int:index>")
def show_post(index):
requested_post = None
for blog_post in post_objects:
if blog_post.id == index:
requested_post = blog_post
return render_template("post.html", post=requested_post)
if __name__ == "__main__":
app.run(debug=True)
|
none
| 1
| 2.935647
| 3
|
|
back-flask/api/user.py
|
LukasL97/shop-4-me
| 0
|
6625367
|
<reponame>LukasL97/shop-4-me<filename>back-flask/api/user.py
from flask import request, Response, make_response
from api.http_status import OK, BAD_REQUEST, NOT_FOUND, UNAUTHORIZED, CONFLICT, UNPROCESSABLE_ENTITY
from model.exception import UnexpectedUserTypeError, UserNotFoundError, IncorrectPasswordError, \
UserAlreadyRegisteredError, UserSessionIdNotFoundError, UnexpectedNumberOfLocationsForAddressError
from model.user import UserHandlerResolver, RequesterHandler
from spec import DocumentedBlueprint
user = DocumentedBlueprint('user', __name__)
@user.route('/login', methods=['POST'])
def login(resolver: UserHandlerResolver) -> Response:
'''
---
post:
summary: login a user
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
loginName:
type: string
password:
type: string
responses:
200:
description: session id for successful login
content:
string:
schema:
type: string
description: session id for this login
400:
description: unexpected user type or incorrect request body
401:
description: incorrect password
404:
description: user not found
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.login(user_data['loginName'], user_data['password']), OK)
except UnexpectedUserTypeError:
return make_response('Unexpected user type %s' % user_data['userType'], BAD_REQUEST)
except UserNotFoundError:
return make_response('User %s not found' % user_data['loginName'], NOT_FOUND)
except IncorrectPasswordError:
return make_response('Incorrect password', UNAUTHORIZED)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
@user.route('/register', methods=['POST'])
def register(resolver: UserHandlerResolver) -> Response:
'''
---
post:
summary: register a user
description: user is logged in after registration immediately
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
loginName:
type: string
password:
type: string
firstName:
type: string
lastName:
type: string
responses:
200:
description: session id for successful registration and login
content:
string:
schema:
type: string
description: session id for this login
400:
description: unexpected user type or incorrect request body
409:
description: user with this loginName already registered
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.register(
login_name=user_data['loginName'],
password=<PASSWORD>['password'],
first_name=user_data['firstName'],
last_name=user_data['lastName']
), OK)
except UnexpectedUserTypeError:
return make_response('Unexpected user type %s' % user_data['userType'], BAD_REQUEST)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserAlreadyRegisteredError:
return make_response('Login name %s is already used' % user_data['loginName'], CONFLICT)
@user.route('/logout', methods=['DELETE'])
def logout(resolver: UserHandlerResolver) -> Response:
'''
---
delete:
summary: logout a logged in user
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
sessionId:
type: string
description: session id of the user that is to be logged out
responses:
200:
description: logout successful
400:
description: unexpected user type or incorrect request body
404:
description: session id did not refer to any logged in user of this type
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.logout(
session_id=user_data['sessionId']
), OK)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserSessionIdNotFoundError:
return make_response('User of type %s with session id %s not found' % (user_data['userType'], user_data['sessionId']), NOT_FOUND)
@user.route('/requester/address', methods=['PUT'])
def set_requester_address(requester_handler: RequesterHandler) -> Response:
'''
---
put:
summary: set your own address as a requester
description: The address is resolved to a geolocation (latitude, longitude) internally, via an external geocoding API.
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
address:
type: object
properties:
street:
type: string
description: street incl. house number in the format common in the according country (e.g. "Junailijankuja 5B")
zip:
type: string
description: zip code (e.g. "00520")
country:
type: string
description: country name in english (but national language should also work) (e.g. "Finland")
sessionId:
type: string
description: session id of the requester setting their address
responses:
200:
description: address set successfully
400:
description: incorrect request body
404:
description: session id did not refer to any logged in user of type Requester
422:
description: the given address could not be resolved to exactly one geolocation via the external geocoding API
'''
user_data = request.json
try:
return make_response(requester_handler.set_address(
street=user_data['address']['street'],
zip=user_data['address']['zip'],
country=user_data['address']['country'],
session_id=user_data['sessionId']
), OK)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserSessionIdNotFoundError:
return make_response('User of type Requester with session id %s not found' % user_data['sessionId'], NOT_FOUND)
except UnexpectedNumberOfLocationsForAddressError as error:
return make_response('Address "%s" resolved to %d geolocations' % (error.address, error.number_of_locations), UNPROCESSABLE_ENTITY)
|
from flask import request, Response, make_response
from api.http_status import OK, BAD_REQUEST, NOT_FOUND, UNAUTHORIZED, CONFLICT, UNPROCESSABLE_ENTITY
from model.exception import UnexpectedUserTypeError, UserNotFoundError, IncorrectPasswordError, \
UserAlreadyRegisteredError, UserSessionIdNotFoundError, UnexpectedNumberOfLocationsForAddressError
from model.user import UserHandlerResolver, RequesterHandler
from spec import DocumentedBlueprint
user = DocumentedBlueprint('user', __name__)
@user.route('/login', methods=['POST'])
def login(resolver: UserHandlerResolver) -> Response:
'''
---
post:
summary: login a user
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
loginName:
type: string
password:
type: string
responses:
200:
description: session id for successful login
content:
string:
schema:
type: string
description: session id for this login
400:
description: unexpected user type or incorrect request body
401:
description: incorrect password
404:
description: user not found
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.login(user_data['loginName'], user_data['password']), OK)
except UnexpectedUserTypeError:
return make_response('Unexpected user type %s' % user_data['userType'], BAD_REQUEST)
except UserNotFoundError:
return make_response('User %s not found' % user_data['loginName'], NOT_FOUND)
except IncorrectPasswordError:
return make_response('Incorrect password', UNAUTHORIZED)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
@user.route('/register', methods=['POST'])
def register(resolver: UserHandlerResolver) -> Response:
'''
---
post:
summary: register a user
description: user is logged in after registration immediately
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
loginName:
type: string
password:
type: string
firstName:
type: string
lastName:
type: string
responses:
200:
description: session id for successful registration and login
content:
string:
schema:
type: string
description: session id for this login
400:
description: unexpected user type or incorrect request body
409:
description: user with this loginName already registered
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.register(
login_name=user_data['loginName'],
password=<PASSWORD>['password'],
first_name=user_data['firstName'],
last_name=user_data['lastName']
), OK)
except UnexpectedUserTypeError:
return make_response('Unexpected user type %s' % user_data['userType'], BAD_REQUEST)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserAlreadyRegisteredError:
return make_response('Login name %s is already used' % user_data['loginName'], CONFLICT)
@user.route('/logout', methods=['DELETE'])
def logout(resolver: UserHandlerResolver) -> Response:
'''
---
delete:
summary: logout a logged in user
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
userType:
type: string
description: either "Requester", "Volunteer" or "ShopOwner"
sessionId:
type: string
description: session id of the user that is to be logged out
responses:
200:
description: logout successful
400:
description: unexpected user type or incorrect request body
404:
description: session id did not refer to any logged in user of this type
'''
user_data = request.json
try:
user_handler = resolver.get(user_data['userType'])
return make_response(user_handler.logout(
session_id=user_data['sessionId']
), OK)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserSessionIdNotFoundError:
return make_response('User of type %s with session id %s not found' % (user_data['userType'], user_data['sessionId']), NOT_FOUND)
@user.route('/requester/address', methods=['PUT'])
def set_requester_address(requester_handler: RequesterHandler) -> Response:
'''
---
put:
summary: set your own address as a requester
description: The address is resolved to a geolocation (latitude, longitude) internally, via an external geocoding API.
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
address:
type: object
properties:
street:
type: string
description: street incl. house number in the format common in the according country (e.g. "Junailijankuja 5B")
zip:
type: string
description: zip code (e.g. "00520")
country:
type: string
description: country name in english (but national language should also work) (e.g. "Finland")
sessionId:
type: string
description: session id of the requester setting their address
responses:
200:
description: address set successfully
400:
description: incorrect request body
404:
description: session id did not refer to any logged in user of type Requester
422:
description: the given address could not be resolved to exactly one geolocation via the external geocoding API
'''
user_data = request.json
try:
return make_response(requester_handler.set_address(
street=user_data['address']['street'],
zip=user_data['address']['zip'],
country=user_data['address']['country'],
session_id=user_data['sessionId']
), OK)
except KeyError:
return make_response('Request body did not contain required information', BAD_REQUEST)
except UserSessionIdNotFoundError:
return make_response('User of type Requester with session id %s not found' % user_data['sessionId'], NOT_FOUND)
except UnexpectedNumberOfLocationsForAddressError as error:
return make_response('Address "%s" resolved to %d geolocations' % (error.address, error.number_of_locations), UNPROCESSABLE_ENTITY)
|
en
| 0.759783
|
--- post: summary: login a user requestBody: required: true content: application/json: schema: type: object properties: userType: type: string description: either "Requester", "Volunteer" or "ShopOwner" loginName: type: string password: type: string responses: 200: description: session id for successful login content: string: schema: type: string description: session id for this login 400: description: unexpected user type or incorrect request body 401: description: incorrect password 404: description: user not found --- post: summary: register a user description: user is logged in after registration immediately requestBody: required: true content: application/json: schema: type: object properties: userType: type: string description: either "Requester", "Volunteer" or "ShopOwner" loginName: type: string password: type: string firstName: type: string lastName: type: string responses: 200: description: session id for successful registration and login content: string: schema: type: string description: session id for this login 400: description: unexpected user type or incorrect request body 409: description: user with this loginName already registered --- delete: summary: logout a logged in user requestBody: required: true content: application/json: schema: type: object properties: userType: type: string description: either "Requester", "Volunteer" or "ShopOwner" sessionId: type: string description: session id of the user that is to be logged out responses: 200: description: logout successful 400: description: unexpected user type or incorrect request body 404: description: session id did not refer to any logged in user of this type --- put: summary: set your own address as a requester description: The address is resolved to a geolocation (latitude, longitude) internally, via an external geocoding API. requestBody: required: true content: application/json: schema: type: object properties: address: type: object properties: street: type: string description: street incl. house number in the format common in the according country (e.g. "Junailijankuja 5B") zip: type: string description: zip code (e.g. "00520") country: type: string description: country name in english (but national language should also work) (e.g. "Finland") sessionId: type: string description: session id of the requester setting their address responses: 200: description: address set successfully 400: description: incorrect request body 404: description: session id did not refer to any logged in user of type Requester 422: description: the given address could not be resolved to exactly one geolocation via the external geocoding API
| 2.781923
| 3
|
2020/15.py
|
KarboniteKream/aoc-2020
| 1
|
6625368
|
<reponame>KarboniteKream/aoc-2020
import util
def speak(numbers, target):
*spoken, last = numbers
spoken = {num: idx for idx, num in enumerate(spoken)}
for turn in range(len(numbers), target):
if last not in spoken:
curr = 0
else:
curr = turn - 1 - spoken[last]
spoken[last] = turn - 1
last = curr
return last
def part1(numbers):
return speak(numbers, 2020)
def part2(numbers):
return speak(numbers, 30_000_000)
lines = util.read_lines("input/15.txt")
numbers = list(map(int, lines[0].split(",")))
util.run(part1, part2, numbers)
|
import util
def speak(numbers, target):
*spoken, last = numbers
spoken = {num: idx for idx, num in enumerate(spoken)}
for turn in range(len(numbers), target):
if last not in spoken:
curr = 0
else:
curr = turn - 1 - spoken[last]
spoken[last] = turn - 1
last = curr
return last
def part1(numbers):
return speak(numbers, 2020)
def part2(numbers):
return speak(numbers, 30_000_000)
lines = util.read_lines("input/15.txt")
numbers = list(map(int, lines[0].split(",")))
util.run(part1, part2, numbers)
|
none
| 1
| 3.406014
| 3
|
|
src/train.py
|
aadithyamd/BertSum
| 0
|
6625369
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline','multi_layer_classifier'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-out_layer", default=-1, type=int)
parser.add_argument("-freeze_initial", default=0, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline','multi_layer_classifier'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-out_layer", default=-1, type=int)
parser.add_argument("-freeze_initial", default=0, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
en
| 0.768783
|
#!/usr/bin/env python Main training workflow Spawns 1 process per GPU # Create a thread to listen for errors in the child processes. # Train with multiprocessing. run process # killed by parent, do nothing # propagate exception to parent process, keeping original traceback A class that listens for exceptions in children processes and propagates the tracebacks to the parent process. init error handler error handler error listener signal handler # kill children processes \n\n-- Tracebacks above this line can probably be ignored --\n\n #
| 2.196675
| 2
|
hardware/ayam/pusher.py
|
haziquehaikal/smartdb
| 0
|
6625370
|
<reponame>haziquehaikal/smartdb<gh_stars>0
from pysher.channel import Channel
from pysher.connection import Connection
import hashlib
import hmac
import logging
import json
VERSION = '0.6.0'
class Pusher(object):
host = "ws.pusherapp.com"
client_id = "Pysher"
protocol = 6
def __init__(self, key, cluster="", secure=True, secret="", user_data=None, log_level=logging.INFO,
daemon=True, port=443, reconnect_interval=10, custom_host="", auto_sub=False,
http_proxy_host="", http_proxy_port=0, http_no_proxy=None, http_proxy_auth=None,
**thread_kwargs):
"""Initialize the Pusher instance.
:param str or bytes key:
:param str cluster:
:param bool secure:
:param bytes or str secret:
:param Optional[Dict] user_data:
:param str log_level:
:param bool daemon:
:param int port:
:param int or float reconnect_interval:
:param str custom_host:
:param bool auto_sub:
:param stt http_proxy_host:
:param int http_proxy_port:
:param http_no_proxy:
:param http_proxy_auth:
:param Any thread_kwargs:
"""
# https://pusher.com/docs/clusters
if cluster:
self.host = "ws-{cluster}.pusher.com".format(cluster=cluster)
else:
self.host = "ws.pusherapp.com"
self.key = key
self.secret = secret
self.user_data = user_data or {}
self.channels = {}
self.raw = ''
self.url = self._build_url(secure, port, custom_host)
if auto_sub:
reconnect_handler = self._reconnect_handler
else:
reconnect_handler = None
self.connection = Connection(self._connection_handler, self.url,
reconnect_handler=reconnect_handler,
log_level=log_level,
daemon=daemon,
reconnect_interval=reconnect_interval,
socket_kwargs=dict(http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port,
http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth,
ping_timeout=100),
**thread_kwargs)
@property
def key_as_bytes(self):
return self.key if isinstance(self.key, bytes) else self.key.encode('UTF-8')
@property
def secret_as_bytes(self):
return self.secret if isinstance(self.secret, bytes) else self.secret.encode('UTF-8')
def connect(self):
"""Connect to Pusher"""
self.connection.start()
def disconnect(self, timeout=None):
"""Disconnect from Pusher"""
self.connection.disconnect(timeout)
self.channels = {}
def subscribe(self, channel_name, auth=None):
"""Subscribe to a channel.
:param str channel_name: The name of the channel to subscribe to.
:param str auth: The token to use if authenticated externally.
:rtype: pysher.Channel
"""
data = {'channel': channel_name}
if auth is None:
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_token(channel_name)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_auth_token(channel_name)
else:
data['auth'] = auth
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name]
def unsubscribe(self, channel_name):
"""Unsubscribe from a channel
:param str channel_name: The name of the channel to unsubscribe from.
"""
if channel_name in self.channels:
self.connection.send_event(
'pusher:unsubscribe', {
'channel': channel_name,
}
)
del self.channels[channel_name]
def channel(self, channel_name):
"""Get an existing channel object by name
:param str channel_name: The name of the channel you want to retrieve
:rtype: pysher.Channel or None
"""
return self.channels.get(channel_name)
def _connection_handler(self, event_name, data, channel_name):
"""Handle incoming data.
:param str event_name: Name of the event.
:param Any data: Data received.
:param str channel_name: Name of the channel this event and data belongs to.
"""
if channel_name in self.channels:
self.channels[channel_name]._handle_event(event_name, data)
def mehsinidata(self):
return self.connection.get_data()
def _reconnect_handler(self):
"""Handle a reconnect."""
for channel_name, channel in self.channels.items():
data = {'channel': channel_name}
if channel.auth:
data['auth'] = channel.auth
self.connection.send_event('pusher:subscribe', data)
def _generate_auth_token(self, channel_name):
"""Generate a token for authentication with the given channel.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}".format(self.connection.socket_id, channel_name)
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key
def _generate_presence_token(self, channel_name):
"""Generate a presence token.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}:{}".format(self.connection.socket_id, channel_name, json.dumps(self.user_data))
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key
def _build_url(self, secure=True, port=None, custom_host=None):
path = "/app/{}?client={}&version={}&protocol={}".format(
self.key, self.client_id, VERSION, self.protocol
)
proto = "wss" if secure else "ws"
host = custom_host or self.host
if not port:
port = 443 if secure else 80
return "{}://{}:{}{}".format(proto, host, port, path)
|
from pysher.channel import Channel
from pysher.connection import Connection
import hashlib
import hmac
import logging
import json
VERSION = '0.6.0'
class Pusher(object):
host = "ws.pusherapp.com"
client_id = "Pysher"
protocol = 6
def __init__(self, key, cluster="", secure=True, secret="", user_data=None, log_level=logging.INFO,
daemon=True, port=443, reconnect_interval=10, custom_host="", auto_sub=False,
http_proxy_host="", http_proxy_port=0, http_no_proxy=None, http_proxy_auth=None,
**thread_kwargs):
"""Initialize the Pusher instance.
:param str or bytes key:
:param str cluster:
:param bool secure:
:param bytes or str secret:
:param Optional[Dict] user_data:
:param str log_level:
:param bool daemon:
:param int port:
:param int or float reconnect_interval:
:param str custom_host:
:param bool auto_sub:
:param stt http_proxy_host:
:param int http_proxy_port:
:param http_no_proxy:
:param http_proxy_auth:
:param Any thread_kwargs:
"""
# https://pusher.com/docs/clusters
if cluster:
self.host = "ws-{cluster}.pusher.com".format(cluster=cluster)
else:
self.host = "ws.pusherapp.com"
self.key = key
self.secret = secret
self.user_data = user_data or {}
self.channels = {}
self.raw = ''
self.url = self._build_url(secure, port, custom_host)
if auto_sub:
reconnect_handler = self._reconnect_handler
else:
reconnect_handler = None
self.connection = Connection(self._connection_handler, self.url,
reconnect_handler=reconnect_handler,
log_level=log_level,
daemon=daemon,
reconnect_interval=reconnect_interval,
socket_kwargs=dict(http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port,
http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth,
ping_timeout=100),
**thread_kwargs)
@property
def key_as_bytes(self):
return self.key if isinstance(self.key, bytes) else self.key.encode('UTF-8')
@property
def secret_as_bytes(self):
return self.secret if isinstance(self.secret, bytes) else self.secret.encode('UTF-8')
def connect(self):
"""Connect to Pusher"""
self.connection.start()
def disconnect(self, timeout=None):
"""Disconnect from Pusher"""
self.connection.disconnect(timeout)
self.channels = {}
def subscribe(self, channel_name, auth=None):
"""Subscribe to a channel.
:param str channel_name: The name of the channel to subscribe to.
:param str auth: The token to use if authenticated externally.
:rtype: pysher.Channel
"""
data = {'channel': channel_name}
if auth is None:
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_token(channel_name)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_auth_token(channel_name)
else:
data['auth'] = auth
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name]
def unsubscribe(self, channel_name):
"""Unsubscribe from a channel
:param str channel_name: The name of the channel to unsubscribe from.
"""
if channel_name in self.channels:
self.connection.send_event(
'pusher:unsubscribe', {
'channel': channel_name,
}
)
del self.channels[channel_name]
def channel(self, channel_name):
"""Get an existing channel object by name
:param str channel_name: The name of the channel you want to retrieve
:rtype: pysher.Channel or None
"""
return self.channels.get(channel_name)
def _connection_handler(self, event_name, data, channel_name):
"""Handle incoming data.
:param str event_name: Name of the event.
:param Any data: Data received.
:param str channel_name: Name of the channel this event and data belongs to.
"""
if channel_name in self.channels:
self.channels[channel_name]._handle_event(event_name, data)
def mehsinidata(self):
return self.connection.get_data()
def _reconnect_handler(self):
"""Handle a reconnect."""
for channel_name, channel in self.channels.items():
data = {'channel': channel_name}
if channel.auth:
data['auth'] = channel.auth
self.connection.send_event('pusher:subscribe', data)
def _generate_auth_token(self, channel_name):
"""Generate a token for authentication with the given channel.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}".format(self.connection.socket_id, channel_name)
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key
def _generate_presence_token(self, channel_name):
"""Generate a presence token.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}:{}".format(self.connection.socket_id, channel_name, json.dumps(self.user_data))
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key
def _build_url(self, secure=True, port=None, custom_host=None):
path = "/app/{}?client={}&version={}&protocol={}".format(
self.key, self.client_id, VERSION, self.protocol
)
proto = "wss" if secure else "ws"
host = custom_host or self.host
if not port:
port = 443 if secure else 80
return "{}://{}:{}{}".format(proto, host, port, path)
|
en
| 0.681441
|
Initialize the Pusher instance. :param str or bytes key: :param str cluster: :param bool secure: :param bytes or str secret: :param Optional[Dict] user_data: :param str log_level: :param bool daemon: :param int port: :param int or float reconnect_interval: :param str custom_host: :param bool auto_sub: :param stt http_proxy_host: :param int http_proxy_port: :param http_no_proxy: :param http_proxy_auth: :param Any thread_kwargs: # https://pusher.com/docs/clusters Connect to Pusher Disconnect from Pusher Subscribe to a channel. :param str channel_name: The name of the channel to subscribe to. :param str auth: The token to use if authenticated externally. :rtype: pysher.Channel Unsubscribe from a channel :param str channel_name: The name of the channel to unsubscribe from. Get an existing channel object by name :param str channel_name: The name of the channel you want to retrieve :rtype: pysher.Channel or None Handle incoming data. :param str event_name: Name of the event. :param Any data: Data received. :param str channel_name: Name of the channel this event and data belongs to. Handle a reconnect. Generate a token for authentication with the given channel. :param str channel_name: Name of the channel to generate a signature for. :rtype: str Generate a presence token. :param str channel_name: Name of the channel to generate a signature for. :rtype: str
| 2.668261
| 3
|
web_crawler/__init__.py
|
tul1/py-webcrawler
| 0
|
6625371
|
""" Top level package for the py-webcrawler """
__author__ = """ <NAME> """
__version__ = '0.1.0'
from web_crawler.web_crawler import WebCrawler
from web_crawler.web_crawler_async import WebCrawlerAsync
__all__ = ['WebCrawler', 'WebCrawlerAsync']
|
""" Top level package for the py-webcrawler """
__author__ = """ <NAME> """
__version__ = '0.1.0'
from web_crawler.web_crawler import WebCrawler
from web_crawler.web_crawler_async import WebCrawlerAsync
__all__ = ['WebCrawler', 'WebCrawlerAsync']
|
en
| 0.63205
|
Top level package for the py-webcrawler <NAME>
| 1.460335
| 1
|
buildingspy/io/__init__.py
|
wanaylor/NewBuildingsPy
| 1
|
6625372
|
'''
This module contains the classes
- *Reader* that can be used to read ``*.mat`` files that have been generated by Dymola,
- *Reporter* that can be used to report to the standard output and standard error streams, and
- *Plotter* that contains method to plot results.
'''
|
'''
This module contains the classes
- *Reader* that can be used to read ``*.mat`` files that have been generated by Dymola,
- *Reporter* that can be used to report to the standard output and standard error streams, and
- *Plotter* that contains method to plot results.
'''
|
en
| 0.915045
|
This module contains the classes - *Reader* that can be used to read ``*.mat`` files that have been generated by Dymola, - *Reporter* that can be used to report to the standard output and standard error streams, and - *Plotter* that contains method to plot results.
| 1.398479
| 1
|
valhalla/_transform.py
|
Best10-study/Kakao-Valhalla
| 10
|
6625373
|
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.base import TransformerMixin
from sklearn.utils import Parallel, delayed
from sklearn.utils.metaestimators import _BaseComposition
class FeatureConcat(_BaseComposition, TransformerMixin):
"""Concatenates results of multiple transformer objects.
FeatureUnion는 결과를 hstack해버리기 때문에, return의 shape가 [batch_size*feature_num,0]으로 나온다.
내가 원하는 return의 shape은 (batch_size, feature_num)으로 고치고 싶다.
sklearn.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=None,
transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('transformer_list', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('transformer_list', **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None or t == 'drop':
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def _iter(self):
"""
Generate (name, trans, weight) tuples excluding None and
'drop' transformers.
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None and trans != 'drop')
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, X, y, weight,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.vstack(Xs).tocsr()
else:
if isinstance(Xs[0], np.ndarray):
Xs = np.vstack(Xs)
elif isinstance(Xs[0], pd.Series) or isinstance(Xs[0], pd.DataFrame):
Xs = pd.concat(Xs, axis=1)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.vstack(Xs).tocsr()
else:
if isinstance(Xs[0], np.ndarray):
Xs = np.vstack(Xs)
elif isinstance(Xs[0], pd.Series) or isinstance(Xs[0], pd.DataFrame):
Xs = pd.concat(Xs, axis=1)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [(name, old if old is None or old == 'drop'
else next(transformers))
for name, old in self.transformer_list]
# weight and fit_params are not used but it allows _fit_one_transformer,
# _transform_one and _fit_transform_one to have the same signature to
# factorize the code in ColumnTransformer
def _fit_one_transformer(transformer, X, y, weight=None, **fit_params):
return transformer.fit(X, y)
def _transform_one(transformer, X, y, weight, **fit_params):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(transformer, X, y, weight, **fit_params):
if hasattr(transformer, 'fit_transform'):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res, transformer
return res * weight, transformer
|
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.base import TransformerMixin
from sklearn.utils import Parallel, delayed
from sklearn.utils.metaestimators import _BaseComposition
class FeatureConcat(_BaseComposition, TransformerMixin):
"""Concatenates results of multiple transformer objects.
FeatureUnion는 결과를 hstack해버리기 때문에, return의 shape가 [batch_size*feature_num,0]으로 나온다.
내가 원하는 return의 shape은 (batch_size, feature_num)으로 고치고 싶다.
sklearn.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=None,
transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('transformer_list', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('transformer_list', **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None or t == 'drop':
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def _iter(self):
"""
Generate (name, trans, weight) tuples excluding None and
'drop' transformers.
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None and trans != 'drop')
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, X, y, weight,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.vstack(Xs).tocsr()
else:
if isinstance(Xs[0], np.ndarray):
Xs = np.vstack(Xs)
elif isinstance(Xs[0], pd.Series) or isinstance(Xs[0], pd.DataFrame):
Xs = pd.concat(Xs, axis=1)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.vstack(Xs).tocsr()
else:
if isinstance(Xs[0], np.ndarray):
Xs = np.vstack(Xs)
elif isinstance(Xs[0], pd.Series) or isinstance(Xs[0], pd.DataFrame):
Xs = pd.concat(Xs, axis=1)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [(name, old if old is None or old == 'drop'
else next(transformers))
for name, old in self.transformer_list]
# weight and fit_params are not used but it allows _fit_one_transformer,
# _transform_one and _fit_transform_one to have the same signature to
# factorize the code in ColumnTransformer
def _fit_one_transformer(transformer, X, y, weight=None, **fit_params):
return transformer.fit(X, y)
def _transform_one(transformer, X, y, weight, **fit_params):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(transformer, X, y, weight, **fit_params):
if hasattr(transformer, 'fit_transform'):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res, transformer
return res * weight, transformer
|
en
| 0.621071
|
Concatenates results of multiple transformer objects. FeatureUnion는 결과를 hstack해버리기 때문에, return의 shape가 [batch_size*feature_num,0]으로 나온다. 내가 원하는 return의 shape은 (batch_size, feature_num)으로 고치고 싶다. sklearn. Parameters ---------- transformer_list : list of (string, transformer) tuples List of transformer objects to be applied to the data. The first half of each tuple is the name of the transformer. n_jobs : int or None, optional (default=None) Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. transformer_weights : dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Returns ------- self # validate names # validate estimators Generate (name, trans, weight) tuples excluding None and 'drop' transformers. Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. Fit all transformers using X. Parameters ---------- X : iterable or array-like, depending on transformers Input data, used to fit transformers. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- self : FeatureUnion This estimator Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. # All transformers are None Transform X separately by each transformer, concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. # All transformers are None # weight and fit_params are not used but it allows _fit_one_transformer, # _transform_one and _fit_transform_one to have the same signature to # factorize the code in ColumnTransformer # if we have a weight for this transformer, multiply output # if we have a weight for this transformer, multiply output
| 2.558154
| 3
|
examples/milestone_add_with_submilestone.py
|
levi-rs/testrail-api-wrapper
| 2
|
6625374
|
from datetime import datetime as dt
import traw
def main():
client = traw.Client() # Credentials loaded through ENV vars
# Locate the project you will be adding milestones to
project = client.project(15)
# Create the parent milestone
new_parent_ms = client.milestone() # Creates a new, empty milestone object
new_parent_ms.name = "{0}'s Yearly Test Results".format(project.name)
new_parent_ms.description = "{0}'s testing results for the year".format(project.name)
new_parent_ms.start_on = dt(year=2018, month=1, day=1) # Start on 2018/01/01
new_parent_ms.due_on = dt(year=2018, month=12, day=31) # Due on 2018/12/31
# Associate this milestone with a project
new_parent_ms.project = project
# Add the new milestone to TestRail, replacing it with the Milestone returned
# from the TestRail API.
parent_ms = client.add(new_parent_ms)
assert isinstance(parent_ms, traw.models.Milestone)
# Create a sub milestone for the first quarter of 2018
new_sub_ms = client.milestone() # Creates a new, empty milestone object
new_sub_ms.name = "{0}'s 1st Quarter Test Results".format(project.name)
new_sub_ms.description = "{0}'s testing results for the first quarter".format(project.name)
new_sub_ms.start_on = dt(year=2018, month=1, day=1) # Start on 2018/01/01
new_sub_ms.due_on = dt(year=2018, month=3, day=31) # Due on 2018/03/31
# Associate this milestone with a project
new_sub_ms.project = project
# Add the parent milestone, transforming this into a sub_milestone object
new_sub_ms = new_sub_ms.add_parent(parent_ms)
# Add the new milestone to TestRail, replacing it with the Milestone returned
# from the TestRail API.
sub_ms = client.add(new_sub_ms)
assert isinstance(sub_ms, traw.models.SubMilestone)
# The sub_ms's parent should match the one we created
assert sub_ms.parent.id == parent_ms.id
if __name__ == "__main__":
main()
|
from datetime import datetime as dt
import traw
def main():
client = traw.Client() # Credentials loaded through ENV vars
# Locate the project you will be adding milestones to
project = client.project(15)
# Create the parent milestone
new_parent_ms = client.milestone() # Creates a new, empty milestone object
new_parent_ms.name = "{0}'s Yearly Test Results".format(project.name)
new_parent_ms.description = "{0}'s testing results for the year".format(project.name)
new_parent_ms.start_on = dt(year=2018, month=1, day=1) # Start on 2018/01/01
new_parent_ms.due_on = dt(year=2018, month=12, day=31) # Due on 2018/12/31
# Associate this milestone with a project
new_parent_ms.project = project
# Add the new milestone to TestRail, replacing it with the Milestone returned
# from the TestRail API.
parent_ms = client.add(new_parent_ms)
assert isinstance(parent_ms, traw.models.Milestone)
# Create a sub milestone for the first quarter of 2018
new_sub_ms = client.milestone() # Creates a new, empty milestone object
new_sub_ms.name = "{0}'s 1st Quarter Test Results".format(project.name)
new_sub_ms.description = "{0}'s testing results for the first quarter".format(project.name)
new_sub_ms.start_on = dt(year=2018, month=1, day=1) # Start on 2018/01/01
new_sub_ms.due_on = dt(year=2018, month=3, day=31) # Due on 2018/03/31
# Associate this milestone with a project
new_sub_ms.project = project
# Add the parent milestone, transforming this into a sub_milestone object
new_sub_ms = new_sub_ms.add_parent(parent_ms)
# Add the new milestone to TestRail, replacing it with the Milestone returned
# from the TestRail API.
sub_ms = client.add(new_sub_ms)
assert isinstance(sub_ms, traw.models.SubMilestone)
# The sub_ms's parent should match the one we created
assert sub_ms.parent.id == parent_ms.id
if __name__ == "__main__":
main()
|
en
| 0.8101
|
# Credentials loaded through ENV vars # Locate the project you will be adding milestones to # Create the parent milestone # Creates a new, empty milestone object # Start on 2018/01/01 # Due on 2018/12/31 # Associate this milestone with a project # Add the new milestone to TestRail, replacing it with the Milestone returned # from the TestRail API. # Create a sub milestone for the first quarter of 2018 # Creates a new, empty milestone object # Start on 2018/01/01 # Due on 2018/03/31 # Associate this milestone with a project # Add the parent milestone, transforming this into a sub_milestone object # Add the new milestone to TestRail, replacing it with the Milestone returned # from the TestRail API. # The sub_ms's parent should match the one we created
| 2.513301
| 3
|
forte/data/readers/tests/conllu_ud_reader_test.py
|
mgupta1410/forte-1
| 0
|
6625375
|
"""
Tests for conllU reader
"""
import os
import unittest
from typing import List
from ft.onto.base_ontology import Sentence, Document, Dependency
from forte.data.readers import ConllUDReader
from forte.data.data_pack import DataPack
class ConllUDReaderTest(unittest.TestCase):
def setUp(self):
"""
Reading the data into data_pack object to be used in the tests
"""
conll_ud_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data_samples/conll_ud')
reader = ConllUDReader()
self.data_packs: List[DataPack] = \
[data_pack for data_pack in reader.iter(conll_ud_dir)]
self.doc_ids = ["weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172713",
"weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172714"]
def test_reader_text(self):
expected_docs_text = [
["From the AP comes this story :",
"President <NAME> Tuesday nominated two individuals to "
"replace retiring jurists on federal courts in the "
"Washington area ."],
["Bush nominated <NAME> for a 15 - year "
"term as associate judge of the Superior Court of the "
"District of Columbia , replacing <NAME> ."]
]
self.assertEqual(len(self.data_packs), 2)
for doc_index, expected_doc_id in enumerate(self.doc_ids):
data_pack = self.data_packs[doc_index]
self.assertTrue(data_pack.meta.doc_id == expected_doc_id)
doc_entry = None
for d in data_pack.get(Document):
doc_entry = d
break
expected_doc_text = expected_docs_text[doc_index]
self.assertEqual(doc_entry.text, ' '.join(expected_doc_text))
sent_entries = data_pack.get(Sentence)
for sent_entry, expected_sent_text in zip(
sent_entries, expected_doc_text):
self.assertEqual(sent_entry.text, expected_sent_text)
def test_reader_dependency_tree(self):
doc_index = 1
data_pack = self.data_packs[doc_index]
expected_doc_id = self.doc_ids[doc_index]
self.assertTrue(data_pack.meta.doc_id == expected_doc_id)
self.assertEqual(
len(data_pack.get_entries_by_type(Sentence)), 1)
dependencies = data_pack.get_entries_by_type(Dependency)
for link in dependencies:
root_token = get_dependency_tree_root(link, data_pack)
self.assertEqual(root_token.text, "nominated")
def get_dependency_tree_root(link, data_pack):
"""
Returns the root token of the dependency tree.
Args:
link: The intermediate dependency link.
data_pack: The data pack to be worked on.
Returns:
"""
# TODO: make it robust enough to handle cycles for enhanced dependencies
token = link.get_parent()
if token.is_root:
return token
parent_link = list(data_pack.get_links_by_child(token))[0]
return token if token.is_root else get_dependency_tree_root(parent_link,
data_pack)
if __name__ == "__main__":
unittest.main()
|
"""
Tests for conllU reader
"""
import os
import unittest
from typing import List
from ft.onto.base_ontology import Sentence, Document, Dependency
from forte.data.readers import ConllUDReader
from forte.data.data_pack import DataPack
class ConllUDReaderTest(unittest.TestCase):
def setUp(self):
"""
Reading the data into data_pack object to be used in the tests
"""
conll_ud_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data_samples/conll_ud')
reader = ConllUDReader()
self.data_packs: List[DataPack] = \
[data_pack for data_pack in reader.iter(conll_ud_dir)]
self.doc_ids = ["weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172713",
"weblog-blogspot.com_nominations_20041117172713_ENG_"
"20041117_172714"]
def test_reader_text(self):
expected_docs_text = [
["From the AP comes this story :",
"President <NAME> Tuesday nominated two individuals to "
"replace retiring jurists on federal courts in the "
"Washington area ."],
["Bush nominated <NAME> for a 15 - year "
"term as associate judge of the Superior Court of the "
"District of Columbia , replacing <NAME> ."]
]
self.assertEqual(len(self.data_packs), 2)
for doc_index, expected_doc_id in enumerate(self.doc_ids):
data_pack = self.data_packs[doc_index]
self.assertTrue(data_pack.meta.doc_id == expected_doc_id)
doc_entry = None
for d in data_pack.get(Document):
doc_entry = d
break
expected_doc_text = expected_docs_text[doc_index]
self.assertEqual(doc_entry.text, ' '.join(expected_doc_text))
sent_entries = data_pack.get(Sentence)
for sent_entry, expected_sent_text in zip(
sent_entries, expected_doc_text):
self.assertEqual(sent_entry.text, expected_sent_text)
def test_reader_dependency_tree(self):
doc_index = 1
data_pack = self.data_packs[doc_index]
expected_doc_id = self.doc_ids[doc_index]
self.assertTrue(data_pack.meta.doc_id == expected_doc_id)
self.assertEqual(
len(data_pack.get_entries_by_type(Sentence)), 1)
dependencies = data_pack.get_entries_by_type(Dependency)
for link in dependencies:
root_token = get_dependency_tree_root(link, data_pack)
self.assertEqual(root_token.text, "nominated")
def get_dependency_tree_root(link, data_pack):
"""
Returns the root token of the dependency tree.
Args:
link: The intermediate dependency link.
data_pack: The data pack to be worked on.
Returns:
"""
# TODO: make it robust enough to handle cycles for enhanced dependencies
token = link.get_parent()
if token.is_root:
return token
parent_link = list(data_pack.get_links_by_child(token))[0]
return token if token.is_root else get_dependency_tree_root(parent_link,
data_pack)
if __name__ == "__main__":
unittest.main()
|
en
| 0.824402
|
Tests for conllU reader Reading the data into data_pack object to be used in the tests Returns the root token of the dependency tree. Args: link: The intermediate dependency link. data_pack: The data pack to be worked on. Returns: # TODO: make it robust enough to handle cycles for enhanced dependencies
| 2.629621
| 3
|
src/penn_chime/validators/validators.py
|
covidcaremap/chime
| 222
|
6625376
|
"""design pattern via https://youtu.be/S_ipdVNSFlo?t=2153"""
from typing import Optional
from datetime import date
from .base import Validator
EPSILON = 1.e-7
class OptionalValue(Validator):
"""Any value at all"""
def __init__(self) -> None:
pass
def validate(self, key, value):
pass
class Bounded(Validator):
"""A bounded number."""
def __init__(
self,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None) -> None:
assert lower_bound is not None or upper_bound is not None, "Do not use this object to create an unbounded validator."
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.message = {
(lower_bound, upper_bound): f"in ({self.lower_bound}, {self.upper_bound})",
(None, upper_bound): f"less than {self.upper_bound}",
(lower_bound, None): f"greater than {self.lower_bound}",
}
def validate(self, key, value):
"""This method implicitly validates isinstance(value, (float, int)) because it will throw a TypeError on comparison"""
if value is None:
raise ValueError(f"{key} is required.")
if (self.upper_bound is not None and value > self.upper_bound) \
or (self.lower_bound is not None and value < self.lower_bound):
raise ValueError(f"{key}: {value} needs to be {self.message[(self.lower_bound, self.upper_bound)]}.")
class OptionalBounded(Bounded):
"""a bounded number or a None."""
def __init__(
self,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None) -> None:
super().__init__(lower_bound=lower_bound, upper_bound=upper_bound)
def validate(self, key, value):
if value is None:
return None
super().validate(key, value)
class Rate(Validator):
"""A rate in [0,1]."""
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
if 0.0 > value or value > 1.0:
raise ValueError(
f"{key}: {value} needs to be a rate (i.e. in [0,1]).")
class Date(Validator):
"""A date."""
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
if not isinstance(value, (date,)):
raise ValueError(f"{key}: {value} must be a date.")
class OptionalDate(Date):
def __init__(self) -> None:
super().__init__()
def validate(self, key, value):
if value is None:
return None
super().validate(key, value)
class ValDisposition(Validator):
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
Bounded(lower_bound=EPSILON)(key=key + '_days', value=value.days)
Rate()(key=key + '_rate', value=value.rate)
|
"""design pattern via https://youtu.be/S_ipdVNSFlo?t=2153"""
from typing import Optional
from datetime import date
from .base import Validator
EPSILON = 1.e-7
class OptionalValue(Validator):
"""Any value at all"""
def __init__(self) -> None:
pass
def validate(self, key, value):
pass
class Bounded(Validator):
"""A bounded number."""
def __init__(
self,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None) -> None:
assert lower_bound is not None or upper_bound is not None, "Do not use this object to create an unbounded validator."
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.message = {
(lower_bound, upper_bound): f"in ({self.lower_bound}, {self.upper_bound})",
(None, upper_bound): f"less than {self.upper_bound}",
(lower_bound, None): f"greater than {self.lower_bound}",
}
def validate(self, key, value):
"""This method implicitly validates isinstance(value, (float, int)) because it will throw a TypeError on comparison"""
if value is None:
raise ValueError(f"{key} is required.")
if (self.upper_bound is not None and value > self.upper_bound) \
or (self.lower_bound is not None and value < self.lower_bound):
raise ValueError(f"{key}: {value} needs to be {self.message[(self.lower_bound, self.upper_bound)]}.")
class OptionalBounded(Bounded):
"""a bounded number or a None."""
def __init__(
self,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None) -> None:
super().__init__(lower_bound=lower_bound, upper_bound=upper_bound)
def validate(self, key, value):
if value is None:
return None
super().validate(key, value)
class Rate(Validator):
"""A rate in [0,1]."""
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
if 0.0 > value or value > 1.0:
raise ValueError(
f"{key}: {value} needs to be a rate (i.e. in [0,1]).")
class Date(Validator):
"""A date."""
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
if not isinstance(value, (date,)):
raise ValueError(f"{key}: {value} must be a date.")
class OptionalDate(Date):
def __init__(self) -> None:
super().__init__()
def validate(self, key, value):
if value is None:
return None
super().validate(key, value)
class ValDisposition(Validator):
def __init__(self) -> None:
pass
def validate(self, key, value):
if value is None:
raise ValueError(f"{key} is required.")
Bounded(lower_bound=EPSILON)(key=key + '_days', value=value.days)
Rate()(key=key + '_rate', value=value.rate)
|
en
| 0.685148
|
design pattern via https://youtu.be/S_ipdVNSFlo?t=2153 Any value at all A bounded number. This method implicitly validates isinstance(value, (float, int)) because it will throw a TypeError on comparison a bounded number or a None. A rate in [0,1]. A date.
| 3.545602
| 4
|
morphomath1.0.py
|
KiliBio/MorphoMath
| 0
|
6625377
|
#!/usr/bin/python
'''
Code written in Python 3.5 by KiliBio
'''
'''
Missions accomplished:
General design ✓
Point-specific mode ✓
Ratio-specific mode
Angle-specific mode
Area-specific mode
all to log_file ✓
'''
import tkinter as tk
import tkinter.messagebox
import csv
from tkinter import ttk
from PIL import ImageTk, Image, ImageGrab
from math import sqrt, floor
from datetime import datetime
import time
title = "MorphoMathv1.0"
the_font = ("Courier 10")
gen_padx = 5
gen_pady = 2
class MorphoMath(tk.Tk):
# the MorphoMath class inherits from the Tk class in the tk module
def __init__(self, *arg, **kwargs): # the init function accepts arguments and keyword arguments, will always run
'''__init__ function to initiate the automatic function to create the widgets'''
tk.Tk.__init__(self, *arg, **kwargs)
# sets the size of the main window automatically to the size of the screen
self.master_width, self.master_height = self.winfo_screenwidth(), self.winfo_screenheight()
print(self.master_width, self.master_height)
self.geometry('%dx%d+0+0' % (self.master_width-18, self.master_height-85))
# icon in the upper-left corner
tk.Tk.iconbitmap(self,
default="C:/morphoMathicon2.ico")
# title of the window
tk.Tk.wm_title(self, title)
ttk.Style().configure('button_design.TButton', foreground='grey5',
background='RoyalBlue2', font='Courier 11 bold')
self.mainframe = tk.Frame(self)
#self.mainframe["bg"] = "khaki2"
self.mainframe.pack(side="top", fill="both", expand=True)
self.mainframe.grid_rowconfigure(0, weight=1)
self.mainframe.grid_columnconfigure(0, weight=1) # weight --> prioritizes things
self["bg"] = "khaki2"
self.allframes = {} # the allframes dictionary stores all frames and make them acessable to switch between windows
for frame in (WarningPage, StartPage,
Point_spec_mode, Distance_spec_mode, Angles_spec_mode, Area_spec_mode):
'''this for loop consists of all windows in the application'''
self.specific_frame = frame(self.mainframe, self)
self.allframes[frame] = self.specific_frame
self.specific_frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(WarningPage)
self.menu = tk.Menu(self)
self.config(menu=self.menu)
self.FileMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="File", menu=self.FileMenu)
self.FileMenu.add_command(label="Point-Mode", command=None)
# there is no controller here which could be accessed!
self.FileMenu.add_separator()
self.FileMenu.add_command(label="Export...", command=None)
self.FileMenu.add_command(label="Import...", command=None)
self.FileMenu.add_separator()
self.FileMenu.add_command(label="Quit", command=None)
self.EditMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="Edit", menu=self.EditMenu)
self.EditMenu.add_command(label="Copy", command=None)
self.EditMenu.add_command(label="Paste", command=None)
self.EditMenu.add_command(label="Find", command=None)
self.HelpMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="Help", menu=self.HelpMenu)
self.HelpMenu.add_command(label="Help...", command=None)
def show_frame(self, cont):
self.specific_frame = self.allframes[cont]
self.specific_frame.tkraise()
def get_page(self, page_class):
return self.allframes[page_class]
class WarningPage(tk.Frame): # Start Page
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.warning_page_frame = tk.Frame(self)
self.warning_page_frame.config(height=300, width=600)
self.warning_page_frame.pack(anchor="center")
self.betweenframe = tk.Frame(self.warning_page_frame)
self.betweenframe.config(height=200)
self.betweenframe.pack(anchor="center")
self.textframe = tk.Frame(self.warning_page_frame)
self.textframe.config(height=200)
self.textframe.pack(fill="both",anchor="center")
self.warninglabel = tk.Label(self.textframe)
self.warninglabel["text"] = """WARNING: Use %s at your own risks!\n
There is no promise of exact calculations or any kind of warranty!\n
Check the source-code to make sure that the calculations are correct or in a way you want them to.""" % (title)
self.warninglabel["font"] = "Courier 10"
self.warninglabel["bg"] = "tomato"
self.warninglabel.pack()
self.buttonframe = tk.Frame(self.warning_page_frame)
self.buttonframe.pack(anchor="center")
self.agree_button = ttk.Button(self.buttonframe, command=lambda: controller.show_frame(StartPage))
self.agree_button["style"] = 'button_design.TButton'
self.agree_button["text"] = "Agree"
self.agree_button["cursor"] = "circle"
self.agree_button.pack(side="left", pady=50, padx=50)
self.disagree_button = ttk.Button(self.buttonframe, command=quit)
self.disagree_button["style"] = 'button_design.TButton'
self.disagree_button["text"] = "Disagree"
self.disagree_button["cursor"] = "circle"
self.disagree_button.pack(side="right", pady=50, padx=50)
self.textframe2 = tk.Frame(self.warning_page_frame)
self.textframe2.config(height=200)
self.textframe2.pack(fill="both", anchor="center")
self.warninglabel = tk.Label(self.textframe2)
self.warninglabel["text"] = """From the author of this program it is strongly recommended,\n
to use this program and the results with an absolute critical evaluation."""
self.warninglabel["font"] = "Courier 10"
self.warninglabel.pack()
class StartPage(tk.Frame): # Start Page
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.startpagelabelframe = tk.Frame(self)
self.startpagelabelframe.config(height=50)
self.startpagelabelframe.pack(side="top")
self.startpagelabelframe.grid_rowconfigure(0, weight=1)
self.startpagelabelframe.grid_columnconfigure(0, weight=1)
self.title_label = tk.Label(self.startpagelabelframe)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.pack(fill="both", anchor="center")
self.startpagebuttonframe = tk.Frame(self)
self.startpagebuttonframe.config(height=50)
self.startpagebuttonframe.pack(side="top", fill="x", anchor="center")
self.point_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point Mode"
self.point_spec_button["style"] = 'button_design.TButton'
self.point_spec_button["cursor"] = "circle"
self.point_spec_button.pack(side="left", anchor="center", padx=100, pady=50)
self.distance_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance Mode"
self.distance_spec_button["style"] = 'button_design.TButton'
self.distance_spec_button["cursor"] = "circle"
self.distance_spec_button.pack(side="left", anchor="center", padx=100, pady=50)
self.angles_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Angles_spec_mode))
self.angles_spec_button["text"] = "Angle Mode"
self.angles_spec_button["style"] = 'button_design.TButton'
self.angles_spec_button["cursor"] = "circle"
self.angles_spec_button.pack(side="right", anchor="center", padx=100, pady=50)
self.area_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Area_spec_mode))
self.area_spec_button["text"] = "Area Mode"
self.area_spec_button["style"] = 'button_design.TButton'
self.area_spec_button["cursor"] = "circle"
self.area_spec_button.pack(side="right", anchor="center", padx=100, pady=50)
self.startpagelabelframe = tk.Frame(self)
self.startpagelabelframe.config(height=50)
self.startpagelabelframe.pack(side="top", fill="x", anchor="center")
self.point_spec_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spec_label["text"] = "The Point Mode allows you to mark \n" \
"specific morphological landmarks on \n" \
"the plant.\n" \
"The distances between all the points \n" \
"and ratios between the all those \n" \
"distances are than calculated and \n" \
"stored in a separate csv-file."
self.point_spec_label.pack(side="left", anchor="center", padx=10, pady=20)
self.point_spex_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spex_label["text"] = "The Distance Mode allows you \n" \
"to mark morphological \n" \
"significant distances, \n" \
"of which the ratio are directly \n" \
"calculated and stored in an \n" \
"additional separate csv-file."
self.point_spex_label.pack(side="left", anchor="center", padx=10, pady=20)
self.point_spet_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spet_label["text"] = "YOU NEED TO RELATE THIS \n" \
"DATA TO SOMETHING TO \n" \
"TO BECOME FROM VALUE!!! \n" \
"The Area Mode allows \n" \
"you to store one or \n" \
"more specific areas in \n" \
"a structure with morphological \n" \
"relevant landmark data and \n" \
"stores those values in a \n" \
"separate csv.file."
self.point_spet_label.pack(side="right", anchor="center", padx=10, pady=20)
self.point_spex_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spex_label["text"] = "YOU NEED TO RELATE THIS \n" \
"DATA TO SOMETHING TO \n" \
"TO BECOME FROM VALUE!!! \n" \
"The angle mode allows \n" \
"you to store one or more\n" \
"specific angles in a structure \n" \
"with morphological relevant landmark \n" \
"data and stores those values in \n" \
"a separate csv.file"
self.point_spex_label.pack(side="right", anchor="center", padx=10, pady=20)
class Point_spec_mode(tk.Frame):
'''the Point-specific mode allow the user to specify an arbitrary amount of points and get a file of ratios'''
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.point_speclabelframe = tk.Frame(self)
self.point_speclabelframe.config(height=40)
self.point_speclabelframe.pack(side="top")
self.point_speclabelframe.grid_rowconfigure(0, weight=1)
self.point_speclabelframe.grid_columnconfigure(0, weight=1)
self.main_label = tk.Label(self.point_speclabelframe)
self.main_label["text"] = "Point specific mode"
self.main_label["font"] = "Courier 11 bold"
self.main_label.pack(fill="both", anchor="center")
self.point_specpath_input_frame = tk.Frame(self)
self.point_specpath_input_frame.config(height=50)
self.point_specpath_input_frame.pack(side="top", fill="x", anchor="center")
### entry-field to enter the image path
global pic_path
self.pic_path = tk.Entry(self.point_specpath_input_frame)
self.pic_path["bg"] = "mint cream"
self.pic_path["fg"] = "grey25"
self.pic_path["bd"] = 2
self.pic_path["cursor"] = "xterm"
self.pic_path["width"] = 80
self.pic_path.delete(0, "end")
self.pic_path.insert(0, "C:/input")
self.pic_path.pack(side="left", fill="x", expand=True, anchor="center", padx=50, pady=10)
self.pic_path.bind("<Return>", self.upload_by_enter)
### in the default-status the uploaded image will be resized, because the image should be taken by powerful
### cameras; the box should be checked if smaller structures are considered
self.resize_decision = True
self.resize_checkbox = ttk.Checkbutton(self.point_specpath_input_frame,
command=self.image_resize_question)
self.resize_checkbox["text"] = "Not Resize image"
self.resize_checkbox.pack(side="left", anchor="center")
### upload button, which will create the canvas with the image specified in the image path entry field
self.uploadbutton = ttk.Button(self.point_specpath_input_frame,
command=self.image_to_canvas)
self.uploadbutton["style"] = 'button_design.TButton'
self.uploadbutton["cursor"] = "circle"
self.uploadbutton["text"] = "✅"
self.uploadbutton.pack(side="right", anchor="center", padx=50, pady=10)
def upload_by_enter(self, event):
'''this function sorely connects the "Press Enter" function the upload function'''
self.image_to_canvas()
def image_resize_question(self):
'''this function will activated through checking the box and the image will not be resized'''
self.resize_decision = False
return self.resize_decision
def image_to_canvas(self):
'''this function loads the image'''
try:
'''if the image exists the image will be opened, if not an IOError will be returned'''
f = open(self.pic_path.get())
f.close()
self.image_path = self.pic_path.get()
### initialing lists, which are used by the program to store the values of the points
self.all_points = []
self.x_cor_points = []
self.y_cor_points = []
self.point_names = []
self.names_with_points = []
self.distance_between_points = []
self.pointname = "No Pointname given"
### options and entry fields on the right side
### everything has been packed into a canvas to be able to scroll around it with growing size, due to points
self.right_side_canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.right_side_canvas.config(width=self.winfo_screenwidth() - 1150,
height=self.winfo_screenheight() - 135)
self.point_specimagespec_frame = tk.Frame(self.right_side_canvas, background="#ffffff")
self.verticalbar_right = tk.Scrollbar(self, orient="vertical", command=self.right_side_canvas.yview)
self.right_side_canvas.configure(yscrollcommand=self.verticalbar_right.set)
self.verticalbar_right.pack(side="right", fill="y")
self.right_side_canvas.pack(side="right", fill="x", expand=True)
self.right_side_canvas.create_window((4, 4), window=self.point_specimagespec_frame,
anchor="n", tags="self.frame")
### with every pressed "Enter" the Frame will be updated and made available for the scrollbar
self.point_specimagespec_frame.bind("<Configure>", self.onFrameConfigure)
self.populate()
# self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(),
# self.right_side_canvas.winfo_y(),
# self.right_side_canvas.winfo_width(),
# self.right_side_canvas.winfo_height()))
except IOError:
tkinter.messagebox.showinfo("Error Message", "Image couldn't be found in image path.")
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.right_side_canvas.configure(scrollregion=self.right_side_canvas.bbox("all"))
def populate(self):
''' this function lets the sidebar grow with newly added labels'''
### head-label: tells the user that this is the point specifier
self.spec_label = tk.Label(self.point_specimagespec_frame)
self.spec_label["text"] = "Point specifier"
self.spec_label["font"] = "Courier 8 bold"
self.spec_label.pack(side="top", anchor="center", padx=5, pady=5)
self.spec_label1 = tk.Label(self.point_specimagespec_frame)
self.spec_label1["text"] = "1.Define point|2.Hit Enter|3.Mark"
self.spec_label1["font"] = "Courier 8"
self.spec_label1.pack(side="top", anchor="center", padx=5, pady=5)
### entry-field to enter the names of the points
self.point_spec = tk.Entry(self.point_specimagespec_frame)
self.point_spec["bg"] = "mint cream"
self.point_spec["fg"] = "grey25"
self.point_spec["bd"] = 2
self.point_spec["cursor"] = "xterm"
self.point_spec["width"] = 40
self.point_spec.pack(side="top", anchor="center", padx=5, pady=5)
self.point_spec.bind("<Return>", self.point_name_append)
self.print_to_frame = tk.Frame(self.point_specimagespec_frame)
self.print_to_frame.config(width=80, height=80)
self.print_to_frame.pack(side="top", fill="x", anchor="center")
self.between_label = tk.Label(self.print_to_frame)
self.between_label.config(width=40, height=1)
self.between_label["font"] = "Courier 8 bold"
self.between_label["text"] = " "
self.between_label.pack(side="bottom", anchor="center", pady=1)
### label to for the file specifier, which names the output file
self.file_spec_label = tk.Label(self.point_specimagespec_frame)
self.file_spec_label["text"] = "Name the Output File"
self.file_spec_label["font"] = "Courier 8"
self.file_spec_label.pack(side="top", anchor="center", padx=5, pady=5)
### Entry where the output file name is specified
self.save_to_file_name = tk.Entry(self.point_specimagespec_frame)
self.save_to_file_name["bg"] = "mint cream"
self.save_to_file_name["fg"] = "grey25"
self.save_to_file_name["bd"] = 2
self.save_to_file_name["cursor"] = "xterm"
self.save_to_file_name["width"] = 40
self.save_to_file_name.pack(side="top", anchor="center", padx=5, pady=5)
self.save_to_file_name.bind("<Return>", self.all_done_by_enter)
self.all_done_button = ttk.Button(self.point_specimagespec_frame,
command=self.all_done_func)
self.all_done_button["style"] = 'button_design.TButton'
self.all_done_button["cursor"] = "circle"
self.all_done_button["text"] = "✅ ALL DONE ✅"
self.all_done_button.pack(side="bottom", anchor="center", padx=5, pady=5)
### frame for the imagecanvas and the image all_points
self.point_specimagecanvas_frame = tk.Frame(self)
self.point_specimagecanvas_frame.config(width=1200)
self.point_specimagecanvas_frame.pack(side="top", fill="x", anchor="center")
self.image_resize(self.image_path)
### creates the Canvas where the image and scrollbars are stored
self.photocanvas = tk.Canvas(self.point_specimagecanvas_frame, width=1150, height=1000)
self.photocanvas.create_image(self.imagex.width() / 2, self.imagex.height() / 2,
anchor="center", image=self.imagex, tags="bg_img")
self.photocanvas.xview_moveto(0)
self.photocanvas.yview_moveto(0)
self.photocanvas["cursor"] = "crosshair"
self.photocanvas["highlightthickness"] = 5
self.horizontalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.HORIZONTAL)
self.horizontalbar.pack(side=tk.BOTTOM, fill=tk.X)
self.verticalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.VERTICAL)
self.verticalbar.pack(side=tk.RIGHT, fill=tk.Y)
self.horizontalbar.config(command=self.photocanvas.xview)
self.verticalbar.config(command=self.photocanvas.yview)
self.photocanvas.config(width=950, height=1000)
self.photocanvas.config(scrollregion=(2, 2, self.imagex.width(), self.imagex.height()))
self.photocanvas.config(xscrollcommand=self.horizontalbar.set, yscrollcommand=self.verticalbar.set)
self.photocanvas.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
### binds left-mouse-button to the point and the right-mouse button to the graph method
self.photocanvas.bind("<Button-1>", self.point)
self.photocanvas.bind("<Button-3>", self.graph)
def image_resize(self, image_path):
''' gets the image_path and stores it in the imagex variable '''
if self.resize_decision == True:
### if the resizing_box remains empty the image will be resized according to this rules
self.imagex = ImageTk.PhotoImage(file=image_path)
### this gets the dimensions of the inputted image
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
### resizing the image
if self.imagex.width() > 1000:
### the size_factor is an individual measure, larger pictures will have a larger size_factor
self.size_factor = floor(self.imagex.width() / 1000)
### and here is the size_factor applied
image = Image.open(self.image_path)
image = image.resize(
(self.imagex.width() // self.size_factor, self.imagex.height() // self.size_factor)
, Image.ANTIALIAS)
self.imagex = ImageTk.PhotoImage(image)
else:
self.size_factor = 1
pass
else:
### if the resizing_box is checked the image remains in the same size and and size_factor will be 1
self.imagex = ImageTk.PhotoImage(file=image_path)
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
self.size_factor = 1
self.manipulatedwidth, self.manipulatedheight = self.imagex.width(), self.imagex.height()
def point_name_append(self, event):
'''recieves the name of the points as specified by the user,
appends it to the list and prints it to the window'''
### recieves the point name from the entry area and appends it to the point_name list
self.pointname = self.point_spec.get()
self.point_names.append(self.pointname)
self.value_label = tk.Label(self.print_to_frame)
self.value_label.config(width=40, height=1)
self.value_label["font"] = "Courier 8 bold"
self.value_label["bg"] = "SeaGreen1"
self.value_label["fg"] = "black"
self.value_label.pack(side="top", anchor="center", pady=1)
### this adds the number of the point and the actual number to the point-list
self.value_label.configure(text="(%d) %s" % (len(self.point_names), self.pointname))
self.point_spec.delete(0, 'end')
return self.point_names, self.pointname
def point(self, event):
'''this function creates a point(oval) around the clicked point'''
self.x, self.y = self.photocanvas.canvasx(event.x), self.photocanvas.canvasy(event.y)
self.x_cor, self.y_cor = int(self.x), int(self.y)
self.last_point = self.photocanvas.create_oval(self.x_cor - 3, self.y_cor - 3,
self.x_cor + 3, self.y_cor + 3, fill="tomato")
### appending the x-coordinate to lists
self.all_points.append(self.x_cor)
self.x_cor_points.append(self.x_cor)
### appending the y-coordinate to lists
self.all_points.append(self.y_cor)
self.y_cor_points.append(self.y_cor)
self.coor_label = tk.Label(self.print_to_frame)
self.coor_label.config(width=40, height=1)
self.coor_label["font"] = "Courier 8 bold"
self.coor_label["bg"] = "CadetBlue1"
self.coor_label["fg"] = "black"
self.coor_label.pack(side="top", anchor="center", pady=1)
self.coor_label.configure(
text="(%d) X: %d | Y: %d" % ((len(self.all_points) / 2), self.x_cor, self.y_cor))
try:
### if there are points in the all-points list, the point names will be displayed onto the screen
self.label_on_canvas = tk.Label(self.photocanvas)
self.label_on_canvas["font"] = "Courier 6"
self.label_on_canvas["bg"] = "White"
self.label_on_canvas["fg"] = "black"
self.label_on_canvas["text"] = ""
self.label_on_canvas.pack()
self.label_on_canvas.configure(text="(%d) %s" % ((len(self.all_points) / 2), self.pointname))
# self.label_on_canvas.configure(text="%s" % (self.point_names[(len(self.all_points) // 2) - 1]))
### if/else to arrange the coordinate value names away from the center
### which could possibly cover other important parts of the image
if self.x_cor < (self.imagex.width() // 2): # on left side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor - 35, self.y_cor - 35,
window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor - 35, self.y_cor + 35,
window=self.label_on_canvas)
else: # on right side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor + 35, self.y_cor - 35,
window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor + 35, self.y_cor + 35,
window=self.label_on_canvas)
except IndexError:
### if no points have been named/identified an IndexError will occur
tkinter.messagebox.showinfo("Error Message", "Marked points on Image hasn't been defined yet")
self.label_on_canvas.configure(text="undefined point")
self.photocanvas.create_window(self.x_cor - 35, self.y_cor - 35, window=self.label_on_canvas)
self.photocanvas.delete(self.last_point)
self.coor_label.destroy()
del self.all_points[-2:]
del self.x_cor_points[-2:]
del self.y_cor_points[-2:]
return self.all_points
def graph(self, event):
'''this function creates a line between the points just to physically see the distance
without effecting anything in the calculations'''
global theline
self.photocanvas.create_line(self.all_points, tags="theline", width=4, fill="tomato")
def all_done_by_enter(self, event):
'''this function will be called by hitting Enter in the output file name entry area'''
self.all_done_func()
def all_done_func(self):
'''Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function'''
### returns the user the time the program has been used to mark and specify the points
enter_time = time.time()
print("Enter Time: --- %d minutes %0.3f seconds ---" %
(((enter_time - start_time) // 60), (enter_time - start_time) % 60))
self.num_of_calcs(len(self.point_names))
if self.all_points == [] or self.point_names == []:
### either no point has been marked on the foto or no points has been given a name
tkinter.messagebox.showinfo("Error Message", "No points have been marked on the image \n"
"or none of the points have been named!")
elif (len(self.all_points) / 2) != len(self.point_names):
### the number of marked points and the number of named points doesn't match
tkinter.messagebox.showinfo("Error Message",
"Number of marked points and Number of Named points doesn't match!")
else:
### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder
self.snapsaveCanvas()
with open('C:/output'
+ self.save_to_file_name.get() + '.csv', "w") as self.csv_file:
self.file_to_write = csv.writer(self.csv_file, lineterminator='\n')
self.file_to_write.writerow(["--------MAIN INFORMATION ABOUT THE FILE--------"])
self.file_to_write.writerow(["FILE NAME", self.save_to_file_name.get()])
self.file_to_write.writerow(["PATH IMAGE", self.pic_path.get()])
self.file_to_write.writerow(["DATE CREATED", str(datetime.now())])
self.file_to_write.writerow(
["ORIGINAL IMAGE DIMENSION", self.originalwidth, self.originalheight])
self.file_to_write.writerow(["COMPRESSED IMAGE DIMENSION", self.manipulatedwidth,
self.manipulatedheight, "Factor %s" % (self.size_factor)])
### calls the points and calculations function
self.all_points_and_calculations_to_file()
self.csv_file.close()
calc_time = time.time()
print("Calc Time: --- %d minutes %0.3f seconds ---" %
(((calc_time - enter_time) // 60), (calc_time - enter_time) % 60))
### terminates the program
self.quit()
def num_of_calcs(self, x):
'''control function, which returns the numbers calculations the program performs'''
number = x
print('Number of points:', number)
firstresult = 0
secondresult = 0
for i in range(0, number):
firstresult += i
print('Number of distances:', firstresult)
for j in range(0, firstresult):
secondresult += j
print('Number of ratios:', secondresult)
def all_points_and_calculations_to_file(self):
'''this function stores all points, all calculation about the distances and ratios in the image'''
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------COORDINATES OF ALL POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["NAME OF THE POINT", "X-COORDINATE", "Y-COORDINATE"])
for i in range(len(self.point_names)):
### appends in point name list: NAME, X, Y and prints those three values to the output file
self.names_with_points.append(self.point_names[i])
self.names_with_points.append(self.x_cor_points[i])
self.names_with_points.append(self.y_cor_points[i])
self.file_to_write.writerow([self.point_names[i], self.x_cor_points[i], self.y_cor_points[i]])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------DISTANCES BETWEEN POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST POINT->2ND POINT", "DISTANCE"])
while self.names_with_points != []:
### pops of the keys with their respective points from the first position
key1 = self.names_with_points.pop(0)
x1 = self.names_with_points.pop(0)
y1 = self.names_with_points.pop(0)
for i in range(0, len(self.names_with_points), 3):
### to calculate the distances all other distances between key1 are calculated
key2 = self.names_with_points[i]
x2 = self.names_with_points[i + 1]
y2 = self.names_with_points[i + 2]
### distance result in a X-Y plane are simply calculated through the Pythagorean theorem
result = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
### the distances are than stored in a list in a similar fashion DISTANCE : NUMBER VALUE
self.distance_between_points.append('%s->%s' % (key1, key2))
self.distance_between_points.append(result)
self.file_to_write.writerow(['%s->%s' % (key1, key2), "%0.4f" % result])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------RATIO BETWEEN EACH DISTANCES--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST DIST/2ND DIST", "RATIO"])
while self.distance_between_points != []:
### the ratios are again calculated similarly
### the first distance is poped of with its value
distance1 = self.distance_between_points.pop(0)
num1 = self.distance_between_points.pop(0)
number1 = float(num1)
for i in range(0, len(self.distance_between_points), 2):
### distance1 is then compared to all other distances
distance2 = self.distance_between_points[i]
num2 = self.distance_between_points[i + 1]
number2 = float(num2)
normal_result = number1 / number2
reversed_result = number2 / number1
self.file_to_write.writerow(["%s/%s" % (distance1, distance2), "%0.4f" % (normal_result)])
self.file_to_write.writerow(["%s/%s" % (distance2, distance1), "%0.4f" % (reversed_result)])
self.file_to_write.writerow([""])
def snapsaveCanvas(self):
''' this function is called, when all inputs by the user are done and takes a screenshot of the image canvas'''
canvas = self.canvas_info() # Get Window Coordinates of Canvas
self.grabcanvas = ImageGrab.grab(bbox=canvas).save(
'C:/output'
+ self.save_to_file_name.get() + '_image_out' + '.jpg')
def canvas_info(self):
'''part of the snapsaveCanvas function, gets the information about the visible canvas'''
x = self.photocanvas.winfo_rootx() + self.photocanvas.winfo_x()
y = self.photocanvas.winfo_rooty() + self.photocanvas.winfo_y()
x1 = x + self.photocanvas.winfo_width()
y1 = y + self.photocanvas.winfo_height()
box = (x, y, x1, y1)
return box
class Distance_spec_mode(tk.Frame):
'''the Distance specific mode lets the user specify ratios that he/she thinks are significant to compare'''
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.point_speclabelframe = tk.Frame(self)
self.point_speclabelframe.config(height=40)
self.point_speclabelframe.pack(side="top")
self.point_speclabelframe.grid_rowconfigure(0, weight=1)
self.point_speclabelframe.grid_columnconfigure(0, weight=1)
self.main_label = tk.Label(self.point_speclabelframe)
self.main_label["text"] = "Distance specific mode"
self.main_label["font"] = "Courier 11 bold"
self.main_label.pack(fill="both", anchor="center")
self.point_specpath_input_frame = tk.Frame(self)
self.point_specpath_input_frame.config(height=50)
self.point_specpath_input_frame.pack(side="top", fill="x", anchor="center")
### entry-field to enter the image path
global pic_path
self.pic_path = tk.Entry(self.point_specpath_input_frame)
self.pic_path["bg"] = "mint cream"
self.pic_path["fg"] = "grey25"
self.pic_path["bd"] = 2
self.pic_path["cursor"] = "xterm"
self.pic_path["width"] = 80
self.pic_path.delete(0, "end")
self.pic_path.insert(0, "C:/output")
self.pic_path.pack(side="left", fill="x", expand=True, anchor="center", padx=50, pady=10)
self.pic_path.bind("<Return>", self.upload_by_enter)
### in the default-status the uploaded image will be resized, because the image should be taken by powerful
### cameras; the box should be checked if smaller structures are considered
self.resize_decision = True
self.resize_checkbox = ttk.Checkbutton(self.point_specpath_input_frame, command=self.image_resize_question)
self.resize_checkbox["text"] = "Not Resize image"
self.resize_checkbox.pack(side="left", anchor="center")
### upload button, which will create the canvas with the image specified in the image path entry field
self.uploadbutton = ttk.Button(self.point_specpath_input_frame,
command=self.image_to_canvas)
self.uploadbutton["style"] = 'button_design.TButton'
self.uploadbutton["cursor"] = "circle"
self.uploadbutton["text"] = "✅"
self.uploadbutton.pack(side="right", anchor="center", padx=50, pady=10)
def upload_by_enter(self, event):
'''this function sorely connects the "Press Enter" function the upload function'''
self.image_to_canvas()
def image_resize_question(self):
'''this function will activated through checking the box and the image will not be resized'''
self.resize_decision = False
return self.resize_decision
def image_to_canvas(self):
'''this function loads the image'''
try:
'''if the image exists the image will be opened, if not an IOError will be returned'''
f = open(self.pic_path.get())
f.close()
self.image_path = self.pic_path.get()
### initialing lists, which are used by the program to store the values of the points
self.all_points = []
self.x_cor_points = []
self.y_cor_points = []
self.distance_names = []
self.name_and_distance = []
self.pointname = "No Pointname given"
### options and entry fields on the right side
### everything has been packed into a canvas to be able to scroll around it with growing size, due to points
self.right_side_canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.right_side_canvas.config(width=self.winfo_screenwidth()-1150, height=self.winfo_screenheight()-135)
self.point_specimagespec_frame = tk.Frame(self.right_side_canvas, background="#ffffff")
self.verticalbar_right = tk.Scrollbar(self, orient="vertical", command=self.right_side_canvas.yview)
self.right_side_canvas.configure(yscrollcommand=self.verticalbar_right.set)
self.verticalbar_right.pack(side="right", fill="y")
self.right_side_canvas.pack(side="right", fill="x", expand=True)
self.right_side_canvas.create_window((4, 4), window=self.point_specimagespec_frame,
anchor="n", tags="self.frame")
### with every pressed "Enter" the Frame will be updated and made available for the scrollbar
self.point_specimagespec_frame.bind("<Configure>", self.onFrameConfigure)
self.populate()
#self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(),
# self.right_side_canvas.winfo_y(),
# self.right_side_canvas.winfo_width(),
# self.right_side_canvas.winfo_height()))
except IOError:
tkinter.messagebox.showinfo("Error Message", "Image couldn't be found in image path.")
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.right_side_canvas.configure(scrollregion=self.right_side_canvas.bbox("all"))
def populate(self):
''' this function lets the sidebar grow with newly added labels'''
### head-label: tells the user that this is the point specifier
self.spec_label = tk.Label(self.point_specimagespec_frame)
self.spec_label["text"] = "Distance specifier"
self.spec_label["font"] = "Courier 8 bold"
self.spec_label.pack(side="top", anchor="center", padx=5, pady=5)
self.spec_label1 = tk.Label(self.point_specimagespec_frame)
self.spec_label1["text"] = "1.Define distance name|\n" \
"2.Hit Enter|3.Mark both points"
self.spec_label1["font"] = "Courier 8"
self.spec_label1.pack(side="top", anchor="center", padx=5, pady=5)
### entry-field to enter the names of the points
self.point_spec = tk.Entry(self.point_specimagespec_frame)
self.point_spec["bg"] = "mint cream"
self.point_spec["fg"] = "grey25"
self.point_spec["bd"] = 2
self.point_spec["cursor"] = "xterm"
self.point_spec["width"] = 40
self.point_spec.pack(side="top", anchor="center", padx=5, pady=5)
self.point_spec.bind("<Return>", self.point_name_append)
self.print_to_frame = tk.Frame(self.point_specimagespec_frame)
self.print_to_frame.config(width=80, height=80)
self.print_to_frame.pack(side="top", fill="x", anchor="center")
self.between_label = tk.Label(self.print_to_frame)
self.between_label.config(width=40, height=1)
self.between_label["font"] = "Courier 8 bold"
self.between_label["text"] = " "
self.between_label.pack(side="bottom", anchor="center", pady=1)
### label to for the file specifier, which names the output file
self.file_spec_label = tk.Label(self.point_specimagespec_frame)
self.file_spec_label["text"] = "Name the Output File"
self.file_spec_label["font"] = "Courier 8"
self.file_spec_label.pack(side="top", anchor="center", padx=5, pady=5)
### Entry where the output file name is specified
self.save_to_file_name = tk.Entry(self.point_specimagespec_frame)
self.save_to_file_name["bg"] = "mint cream"
self.save_to_file_name["fg"] = "grey25"
self.save_to_file_name["bd"] = 2
self.save_to_file_name["cursor"] = "xterm"
self.save_to_file_name["width"] = 40
self.save_to_file_name.pack(side="top", anchor="center", padx=5, pady=5)
self.save_to_file_name.bind("<Return>", self.all_done_by_enter)
self.all_done_button = ttk.Button(self.point_specimagespec_frame,
command=self.all_done_func)
self.all_done_button["style"] = 'button_design.TButton'
self.all_done_button["text"] = "✅ ALL DONE ✅"
self.all_done_button["cursor"] = "circle"
self.all_done_button.pack(side="bottom", anchor="center", padx=5, pady=5)
### frame for the imagecanvas and the image all_points
self.point_specimagecanvas_frame = tk.Frame(self)
self.point_specimagecanvas_frame.config(width=1200)
self.point_specimagecanvas_frame.pack(side="top", fill="x", anchor="center")
self.image_resize(self.image_path)
### creates the Canvas where the image and scrollbars are stored
self.photocanvas = tk.Canvas(self.point_specimagecanvas_frame, width=1150, height=1000)
self.photocanvas.create_image(self.imagex.width() / 2, self.imagex.height() / 2,
anchor="center", image=self.imagex, tags="bg_img")
self.photocanvas.xview_moveto(0)
self.photocanvas.yview_moveto(0)
self.photocanvas["cursor"] = "crosshair"
self.photocanvas["highlightthickness"] = 5
self.horizontalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.HORIZONTAL)
self.horizontalbar.pack(side=tk.BOTTOM, fill=tk.X)
self.verticalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.VERTICAL)
self.verticalbar.pack(side=tk.RIGHT, fill=tk.Y)
self.horizontalbar.config(command=self.photocanvas.xview)
self.verticalbar.config(command=self.photocanvas.yview)
self.photocanvas.config(width=950, height=1000)
self.photocanvas.config(scrollregion=(2, 2, self.imagex.width(), self.imagex.height()))
self.photocanvas.config(xscrollcommand=self.horizontalbar.set, yscrollcommand=self.verticalbar.set)
self.photocanvas.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
### binds left-mouse-button to the point and the right-mouse button to the graph method
self.photocanvas.bind("<Button-1>", self.point)
def image_resize(self, image_path):
''' gets the image_path and stores it in the imagex variable '''
if self.resize_decision == True:
### if the resizing_box remains empty the image will be resized according to this rules
self.imagex = ImageTk.PhotoImage(file=image_path)
### this gets the dimensions of the inputted image
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
### resizing the image
if self.imagex.width() > 1000:
### the size_factor is an individual measure, larger pictures will have a larger size_factor
self.size_factor = floor(self.imagex.width() / 1000)
### and here is the size_factor applied
image = Image.open(self.image_path)
image = image.resize((self.imagex.width() // self.size_factor, self.imagex.height() // self.size_factor)
, Image.ANTIALIAS)
self.imagex = ImageTk.PhotoImage(image)
else:
self.size_factor = 1
pass
else:
### if the resizing_box is checked the image remains in the same size and and size_factor will be 1
self.imagex = ImageTk.PhotoImage(file=image_path)
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
self.size_factor = 1
self.manipulatedwidth, self.manipulatedheight = self.imagex.width(), self.imagex.height()
def point_name_append(self, event):
'''recieves the name of the points as specified by the user,
appends it to the list and prints it to the window'''
### recieves the point name from the entry area and appends it to the point_name list
self.pointname = self.point_spec.get()
self.distance_names.append(self.pointname)
self.value_label = tk.Label(self.print_to_frame)
self.value_label.config(width=40, height=1)
self.value_label["font"] = "Courier 8 bold"
self.value_label["bg"] = "SeaGreen1"
self.value_label["fg"] = "black"
self.value_label.pack(side="top", anchor="center", pady=1)
### this adds the number of the point and the actual number to the point-list
self.value_label.configure(text="(%d) %s" % (len(self.distance_names), self.pointname))
self.point_spec.delete(0, 'end')
return self.distance_names, self.pointname
def point(self, event):
'''this function creates a point(oval) around the clicked point'''
self.x, self.y = self.photocanvas.canvasx(event.x), self.photocanvas.canvasy(event.y)
self.x_cor, self.y_cor = int(self.x), int(self.y)
self.last_point = self.photocanvas.create_oval(self.x_cor - 3, self.y_cor - 3,
self.x_cor + 3, self.y_cor + 3, fill="tomato")
### appending the x-coordinate to lists
self.all_points.append(self.x_cor)
self.x_cor_points.append(self.x_cor)
### appending the y-coordinate to lists
self.all_points.append(self.y_cor)
self.y_cor_points.append(self.y_cor)
if (len(self.x_cor_points) % 2 == 0):
### if they are two or multiple of 2 x-coordinates the list a line will be created between those points
global theline
self.photocanvas.create_line(self.all_points[-4], self.all_points[-3], self.all_points[-2],
self.all_points[-1], tags="theline", width=2, fill="tomato")
self.coor_label = tk.Label(self.print_to_frame)
self.coor_label.config(width=40, height=1)
self.coor_label["font"] = "Courier 8 bold"
self.coor_label["bg"] = "CadetBlue1"
self.coor_label["fg"] = "black"
self.coor_label.pack(side="top", anchor="center", pady=1)
self.coor_label.configure(text="(%d)X1:%d|Y1:%d-->X2:%d|Y2:%d" % ((len(self.all_points) / 4),
self.all_points[-4], self.all_points[-3],
self.all_points[-2],self.all_points[-1]))
try:
### if there are points in the all-points list, the point names will be displayed onto the screen
self.label_on_canvas = tk.Label(self.photocanvas)
self.label_on_canvas["font"] = "Courier 6"
self.label_on_canvas["bg"] = "White"
self.label_on_canvas["fg"] = "black"
self.label_on_canvas["text"] = ""
self.label_on_canvas.pack()
self.label_on_canvas.configure(text="(%d) %s" % ((len(self.all_points)/4), self.pointname))
### if/else to arrange the coordinate value names away from the center
### which could possibly cover other important parts of the image
if self.x_cor < (self.imagex.width() // 2): # on left side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor - 25, self.y_cor - 25, window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor - 25, self.y_cor + 25, window=self.label_on_canvas)
else: # on right side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor + 25, self.y_cor - 25, window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor + 25, self.y_cor + 25, window=self.label_on_canvas)
except IndexError:
### if no points have been named/identified an IndexError will occur
tkinter.messagebox.showinfo("Error Message", "Marked points on Image hasn't been defined yet")
self.label_on_canvas.configure(text="undefined point")
self.photocanvas.create_window(self.x_cor - 25, self.y_cor - 25, window=self.label_on_canvas)
self.photocanvas.delete(self.last_point)
self.coor_label.destroy()
del self.all_points[-2:]
del self.x_cor_points[-2:]
del self.y_cor_points[-2:]
return self.all_points
def all_done_by_enter(self, event):
'''this function will be called by hitting Enter in the output file name entry area'''
self.all_done_func()
def all_done_func(self):
'''Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function'''
### returns the user the time the program has been used to mark and specify the points
enter_time = time.time()
print("Enter Time: --- %d minutes %0.3f seconds ---" %
(((enter_time - start_time) // 60), (enter_time - start_time) % 60))
self.num_of_calcs(len(self.distance_names))
if self.all_points == [] or self.distance_names == []:
### either no point has been marked on the foto or no points has been given a name
tkinter.messagebox.showinfo("Error Message", "No points have been marked on the image \n"
"or none of the points have been named!")
elif (len(self.all_points)/4) != len(self.distance_names):
### the number of marked points and the number of named points doesn't match
tkinter.messagebox.showinfo("Error Message",
"Number of marked points and Number of Named points doesn't match!")
else:
### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder
self.snapsaveCanvas()
with open('C:/output'
+ self.save_to_file_name.get() + '.csv', "w") as self.csv_file:
self.file_to_write = csv.writer(self.csv_file, lineterminator = '\n')
self.file_to_write.writerow(["--------MAIN INFORMATION ABOUT THE FILE--------"])
self.file_to_write.writerow(["FILE NAME", self.save_to_file_name.get()])
self.file_to_write.writerow(["PATH IMAGE", self.pic_path.get()])
self.file_to_write.writerow(["DATE CREATED", str(datetime.now())])
self.file_to_write.writerow(["ORIGINAL IMAGE DIMENSION", self.originalwidth, self.originalheight])
self.file_to_write.writerow(["COMPRESSED IMAGE DIMENSION", self.manipulatedwidth,
self.manipulatedheight, "Factor %s" % (self.size_factor)])
### calls the points and calculations function
self.all_points_and_calculations_to_file()
self.csv_file.close()
calc_time = time.time()
print("Calc Time: --- %d minutes %0.3f seconds ---" %
(((calc_time - enter_time) // 60), (calc_time - enter_time) % 60))
### terminates the program
self.quit()
def num_of_calcs(self, x):
'''control function, which returns the numbers calculations the program performs'''
number = x*2
print('Number of points:', number)
firstresult = 0
secondresult = 0
for i in range(0, number):
firstresult += i
print('Number of distances:', firstresult)
for j in range(0, firstresult):
secondresult += j
print('Number of ratios:', secondresult)
def all_points_and_calculations_to_file(self):
'''this function stores all points, all calculation about the distances and ratios in the image'''
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------COORDINATES OF ALL POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["NAME OF THE POINT", "X1-COORDINATE", "Y1-COORDINATE",
"X2-COORDINATE", "Y2-COORDINATE", "DISTANCE"])
for i in range(len(self.distance_names)):
### appends in point name list: NAME, X, Y and prints those three values to the output file
self.name_and_distance.append(self.distance_names[i])
self.x_zero = self.x_cor_points.pop(0)
self.y_zero = self.y_cor_points.pop(0)
self.x_one = self.x_cor_points.pop(0)
self.y_one = self.y_cor_points.pop(0)
result = sqrt((self.x_zero - self.x_one) ** 2 + (self.y_zero - self.y_one) ** 2)
self.name_and_distance.append(result)
self.file_to_write.writerow([self.distance_names[i], self.x_zero, self.y_zero,
self.x_one, self.y_one, result])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------RATIO BETWEEN EACH DISTANCES--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST DIST/2ND DIST", "RATIO"])
while self.name_and_distance != []:
### the ratios are again calculated similarly
### the first distance is poped of with its value
distance_name_zero = self.name_and_distance.pop(0)
num1 = self.name_and_distance.pop(0)
number1 = float(num1)
for i in range(0, len(self.name_and_distance), 2):
### distance_name_zero is then compared to all other distances
distance_name_one = self.name_and_distance[i]
num2 = self.name_and_distance[i + 1]
number2 = float(num2)
normal_result = number1 / number2
reversed_result = number2 / number1
self.file_to_write.writerow(["%s/%s" % (distance_name_zero, distance_name_one), "%0.4f" % (normal_result)])
self.file_to_write.writerow(["%s/%s" % (distance_name_one, distance_name_zero), "%0.4f" % (reversed_result)])
self.file_to_write.writerow([""])
def snapsaveCanvas(self):
''' this function is called, when all inputs by the user are done and takes a screenshot of the image canvas'''
canvas = self.canvas_info() # Get Window Coordinates of Canvas
self.grabcanvas = ImageGrab.grab(bbox=canvas).save('C:/output'
+ self.save_to_file_name.get() + '_image_out' + '.jpg')
def canvas_info(self):
'''part of the snapsaveCanvas function, gets the information about the visible canvas'''
x = self.photocanvas.winfo_rootx()+self.photocanvas.winfo_x()
y = self.photocanvas.winfo_rooty()+self.photocanvas.winfo_y()
x1 = x+self.photocanvas.winfo_width()
y1 = y+self.photocanvas.winfo_height()
box = (x, y, x1, y1)
return box
class Angles_spec_mode(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.title_label = tk.Label(self)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.grid(columnspan=4, row=0, pady=gen_pady, padx=gen_padx)
self.main_label = tk.Label(self)
self.main_label["text"] = "Angles specific mode"
self.main_label["font"] = the_font
self.main_label.grid(columnspan=4, row=1, pady=gen_pady, padx=gen_padx)
self.startpage_button = ttk.Button(self, command=lambda: controller.show_frame(StartPage))
self.startpage_button["text"] = "Startpage"
self.startpage_button.grid(column=0, row=2)
self.point_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point_spec_mode_file_get"
self.point_spec_button.grid(column=1, row=2)
self.distance_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance_spec_mode"
self.distance_spec_button.grid(column=2, row=2)
self.area_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Area_spec_mode))
self.area_spec_button["text"] = "Area_spec_mode"
self.area_spec_button.grid(column=3, row=2)
class Area_spec_mode(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.title_label = tk.Label(self)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.grid(columnspan=4, row=0, pady=gen_pady, padx=gen_padx)
self.main_label = tk.Label(self)
self.main_label["text"] = "Area specific mode"
self.main_label["font"] = the_font
self.main_label.grid(columnspan=4, row=1, pady=gen_pady, padx=gen_padx)
self.startpage_button = ttk.Button(self, command=lambda: controller.show_frame(StartPage))
self.startpage_button["text"] = "Startpage"
self.startpage_button.grid(column=0, row=2)
self.point_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point_spec_mode_file_get"
self.point_spec_button.grid(column=1, row=2)
self.distance_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance_spec_mode"
self.distance_spec_button.grid(column=2, row=2)
self.angles_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Angles_spec_mode))
self.angles_spec_button["text"] = "Angles_spec_mode"
self.angles_spec_button.grid(column=3, row=2)
if __name__ == "__main__":
start_time = time.time()
application = MorphoMath()
application.mainloop()
end_time = time.time()
print("Successful program execution")
print("Total Time: --- %d minutes %0.3f seconds ---" %
(((end_time - start_time) // 60), (end_time - start_time) % 60))
|
#!/usr/bin/python
'''
Code written in Python 3.5 by KiliBio
'''
'''
Missions accomplished:
General design ✓
Point-specific mode ✓
Ratio-specific mode
Angle-specific mode
Area-specific mode
all to log_file ✓
'''
import tkinter as tk
import tkinter.messagebox
import csv
from tkinter import ttk
from PIL import ImageTk, Image, ImageGrab
from math import sqrt, floor
from datetime import datetime
import time
title = "MorphoMathv1.0"
the_font = ("Courier 10")
gen_padx = 5
gen_pady = 2
class MorphoMath(tk.Tk):
# the MorphoMath class inherits from the Tk class in the tk module
def __init__(self, *arg, **kwargs): # the init function accepts arguments and keyword arguments, will always run
'''__init__ function to initiate the automatic function to create the widgets'''
tk.Tk.__init__(self, *arg, **kwargs)
# sets the size of the main window automatically to the size of the screen
self.master_width, self.master_height = self.winfo_screenwidth(), self.winfo_screenheight()
print(self.master_width, self.master_height)
self.geometry('%dx%d+0+0' % (self.master_width-18, self.master_height-85))
# icon in the upper-left corner
tk.Tk.iconbitmap(self,
default="C:/morphoMathicon2.ico")
# title of the window
tk.Tk.wm_title(self, title)
ttk.Style().configure('button_design.TButton', foreground='grey5',
background='RoyalBlue2', font='Courier 11 bold')
self.mainframe = tk.Frame(self)
#self.mainframe["bg"] = "khaki2"
self.mainframe.pack(side="top", fill="both", expand=True)
self.mainframe.grid_rowconfigure(0, weight=1)
self.mainframe.grid_columnconfigure(0, weight=1) # weight --> prioritizes things
self["bg"] = "khaki2"
self.allframes = {} # the allframes dictionary stores all frames and make them acessable to switch between windows
for frame in (WarningPage, StartPage,
Point_spec_mode, Distance_spec_mode, Angles_spec_mode, Area_spec_mode):
'''this for loop consists of all windows in the application'''
self.specific_frame = frame(self.mainframe, self)
self.allframes[frame] = self.specific_frame
self.specific_frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(WarningPage)
self.menu = tk.Menu(self)
self.config(menu=self.menu)
self.FileMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="File", menu=self.FileMenu)
self.FileMenu.add_command(label="Point-Mode", command=None)
# there is no controller here which could be accessed!
self.FileMenu.add_separator()
self.FileMenu.add_command(label="Export...", command=None)
self.FileMenu.add_command(label="Import...", command=None)
self.FileMenu.add_separator()
self.FileMenu.add_command(label="Quit", command=None)
self.EditMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="Edit", menu=self.EditMenu)
self.EditMenu.add_command(label="Copy", command=None)
self.EditMenu.add_command(label="Paste", command=None)
self.EditMenu.add_command(label="Find", command=None)
self.HelpMenu = tk.Menu(self.menu)
self.menu.add_cascade(label="Help", menu=self.HelpMenu)
self.HelpMenu.add_command(label="Help...", command=None)
def show_frame(self, cont):
self.specific_frame = self.allframes[cont]
self.specific_frame.tkraise()
def get_page(self, page_class):
return self.allframes[page_class]
class WarningPage(tk.Frame): # Start Page
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.warning_page_frame = tk.Frame(self)
self.warning_page_frame.config(height=300, width=600)
self.warning_page_frame.pack(anchor="center")
self.betweenframe = tk.Frame(self.warning_page_frame)
self.betweenframe.config(height=200)
self.betweenframe.pack(anchor="center")
self.textframe = tk.Frame(self.warning_page_frame)
self.textframe.config(height=200)
self.textframe.pack(fill="both",anchor="center")
self.warninglabel = tk.Label(self.textframe)
self.warninglabel["text"] = """WARNING: Use %s at your own risks!\n
There is no promise of exact calculations or any kind of warranty!\n
Check the source-code to make sure that the calculations are correct or in a way you want them to.""" % (title)
self.warninglabel["font"] = "Courier 10"
self.warninglabel["bg"] = "tomato"
self.warninglabel.pack()
self.buttonframe = tk.Frame(self.warning_page_frame)
self.buttonframe.pack(anchor="center")
self.agree_button = ttk.Button(self.buttonframe, command=lambda: controller.show_frame(StartPage))
self.agree_button["style"] = 'button_design.TButton'
self.agree_button["text"] = "Agree"
self.agree_button["cursor"] = "circle"
self.agree_button.pack(side="left", pady=50, padx=50)
self.disagree_button = ttk.Button(self.buttonframe, command=quit)
self.disagree_button["style"] = 'button_design.TButton'
self.disagree_button["text"] = "Disagree"
self.disagree_button["cursor"] = "circle"
self.disagree_button.pack(side="right", pady=50, padx=50)
self.textframe2 = tk.Frame(self.warning_page_frame)
self.textframe2.config(height=200)
self.textframe2.pack(fill="both", anchor="center")
self.warninglabel = tk.Label(self.textframe2)
self.warninglabel["text"] = """From the author of this program it is strongly recommended,\n
to use this program and the results with an absolute critical evaluation."""
self.warninglabel["font"] = "Courier 10"
self.warninglabel.pack()
class StartPage(tk.Frame): # Start Page
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.startpagelabelframe = tk.Frame(self)
self.startpagelabelframe.config(height=50)
self.startpagelabelframe.pack(side="top")
self.startpagelabelframe.grid_rowconfigure(0, weight=1)
self.startpagelabelframe.grid_columnconfigure(0, weight=1)
self.title_label = tk.Label(self.startpagelabelframe)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.pack(fill="both", anchor="center")
self.startpagebuttonframe = tk.Frame(self)
self.startpagebuttonframe.config(height=50)
self.startpagebuttonframe.pack(side="top", fill="x", anchor="center")
self.point_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point Mode"
self.point_spec_button["style"] = 'button_design.TButton'
self.point_spec_button["cursor"] = "circle"
self.point_spec_button.pack(side="left", anchor="center", padx=100, pady=50)
self.distance_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance Mode"
self.distance_spec_button["style"] = 'button_design.TButton'
self.distance_spec_button["cursor"] = "circle"
self.distance_spec_button.pack(side="left", anchor="center", padx=100, pady=50)
self.angles_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Angles_spec_mode))
self.angles_spec_button["text"] = "Angle Mode"
self.angles_spec_button["style"] = 'button_design.TButton'
self.angles_spec_button["cursor"] = "circle"
self.angles_spec_button.pack(side="right", anchor="center", padx=100, pady=50)
self.area_spec_button = ttk.Button(self.startpagebuttonframe, command=lambda: controller.show_frame(Area_spec_mode))
self.area_spec_button["text"] = "Area Mode"
self.area_spec_button["style"] = 'button_design.TButton'
self.area_spec_button["cursor"] = "circle"
self.area_spec_button.pack(side="right", anchor="center", padx=100, pady=50)
self.startpagelabelframe = tk.Frame(self)
self.startpagelabelframe.config(height=50)
self.startpagelabelframe.pack(side="top", fill="x", anchor="center")
self.point_spec_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spec_label["text"] = "The Point Mode allows you to mark \n" \
"specific morphological landmarks on \n" \
"the plant.\n" \
"The distances between all the points \n" \
"and ratios between the all those \n" \
"distances are than calculated and \n" \
"stored in a separate csv-file."
self.point_spec_label.pack(side="left", anchor="center", padx=10, pady=20)
self.point_spex_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spex_label["text"] = "The Distance Mode allows you \n" \
"to mark morphological \n" \
"significant distances, \n" \
"of which the ratio are directly \n" \
"calculated and stored in an \n" \
"additional separate csv-file."
self.point_spex_label.pack(side="left", anchor="center", padx=10, pady=20)
self.point_spet_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spet_label["text"] = "YOU NEED TO RELATE THIS \n" \
"DATA TO SOMETHING TO \n" \
"TO BECOME FROM VALUE!!! \n" \
"The Area Mode allows \n" \
"you to store one or \n" \
"more specific areas in \n" \
"a structure with morphological \n" \
"relevant landmark data and \n" \
"stores those values in a \n" \
"separate csv.file."
self.point_spet_label.pack(side="right", anchor="center", padx=10, pady=20)
self.point_spex_label = tk.Label(self.startpagelabelframe, width=40, height=200)
self.point_spex_label["text"] = "YOU NEED TO RELATE THIS \n" \
"DATA TO SOMETHING TO \n" \
"TO BECOME FROM VALUE!!! \n" \
"The angle mode allows \n" \
"you to store one or more\n" \
"specific angles in a structure \n" \
"with morphological relevant landmark \n" \
"data and stores those values in \n" \
"a separate csv.file"
self.point_spex_label.pack(side="right", anchor="center", padx=10, pady=20)
class Point_spec_mode(tk.Frame):
'''the Point-specific mode allow the user to specify an arbitrary amount of points and get a file of ratios'''
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.point_speclabelframe = tk.Frame(self)
self.point_speclabelframe.config(height=40)
self.point_speclabelframe.pack(side="top")
self.point_speclabelframe.grid_rowconfigure(0, weight=1)
self.point_speclabelframe.grid_columnconfigure(0, weight=1)
self.main_label = tk.Label(self.point_speclabelframe)
self.main_label["text"] = "Point specific mode"
self.main_label["font"] = "Courier 11 bold"
self.main_label.pack(fill="both", anchor="center")
self.point_specpath_input_frame = tk.Frame(self)
self.point_specpath_input_frame.config(height=50)
self.point_specpath_input_frame.pack(side="top", fill="x", anchor="center")
### entry-field to enter the image path
global pic_path
self.pic_path = tk.Entry(self.point_specpath_input_frame)
self.pic_path["bg"] = "mint cream"
self.pic_path["fg"] = "grey25"
self.pic_path["bd"] = 2
self.pic_path["cursor"] = "xterm"
self.pic_path["width"] = 80
self.pic_path.delete(0, "end")
self.pic_path.insert(0, "C:/input")
self.pic_path.pack(side="left", fill="x", expand=True, anchor="center", padx=50, pady=10)
self.pic_path.bind("<Return>", self.upload_by_enter)
### in the default-status the uploaded image will be resized, because the image should be taken by powerful
### cameras; the box should be checked if smaller structures are considered
self.resize_decision = True
self.resize_checkbox = ttk.Checkbutton(self.point_specpath_input_frame,
command=self.image_resize_question)
self.resize_checkbox["text"] = "Not Resize image"
self.resize_checkbox.pack(side="left", anchor="center")
### upload button, which will create the canvas with the image specified in the image path entry field
self.uploadbutton = ttk.Button(self.point_specpath_input_frame,
command=self.image_to_canvas)
self.uploadbutton["style"] = 'button_design.TButton'
self.uploadbutton["cursor"] = "circle"
self.uploadbutton["text"] = "✅"
self.uploadbutton.pack(side="right", anchor="center", padx=50, pady=10)
def upload_by_enter(self, event):
'''this function sorely connects the "Press Enter" function the upload function'''
self.image_to_canvas()
def image_resize_question(self):
'''this function will activated through checking the box and the image will not be resized'''
self.resize_decision = False
return self.resize_decision
def image_to_canvas(self):
'''this function loads the image'''
try:
'''if the image exists the image will be opened, if not an IOError will be returned'''
f = open(self.pic_path.get())
f.close()
self.image_path = self.pic_path.get()
### initialing lists, which are used by the program to store the values of the points
self.all_points = []
self.x_cor_points = []
self.y_cor_points = []
self.point_names = []
self.names_with_points = []
self.distance_between_points = []
self.pointname = "No Pointname given"
### options and entry fields on the right side
### everything has been packed into a canvas to be able to scroll around it with growing size, due to points
self.right_side_canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.right_side_canvas.config(width=self.winfo_screenwidth() - 1150,
height=self.winfo_screenheight() - 135)
self.point_specimagespec_frame = tk.Frame(self.right_side_canvas, background="#ffffff")
self.verticalbar_right = tk.Scrollbar(self, orient="vertical", command=self.right_side_canvas.yview)
self.right_side_canvas.configure(yscrollcommand=self.verticalbar_right.set)
self.verticalbar_right.pack(side="right", fill="y")
self.right_side_canvas.pack(side="right", fill="x", expand=True)
self.right_side_canvas.create_window((4, 4), window=self.point_specimagespec_frame,
anchor="n", tags="self.frame")
### with every pressed "Enter" the Frame will be updated and made available for the scrollbar
self.point_specimagespec_frame.bind("<Configure>", self.onFrameConfigure)
self.populate()
# self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(),
# self.right_side_canvas.winfo_y(),
# self.right_side_canvas.winfo_width(),
# self.right_side_canvas.winfo_height()))
except IOError:
tkinter.messagebox.showinfo("Error Message", "Image couldn't be found in image path.")
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.right_side_canvas.configure(scrollregion=self.right_side_canvas.bbox("all"))
def populate(self):
''' this function lets the sidebar grow with newly added labels'''
### head-label: tells the user that this is the point specifier
self.spec_label = tk.Label(self.point_specimagespec_frame)
self.spec_label["text"] = "Point specifier"
self.spec_label["font"] = "Courier 8 bold"
self.spec_label.pack(side="top", anchor="center", padx=5, pady=5)
self.spec_label1 = tk.Label(self.point_specimagespec_frame)
self.spec_label1["text"] = "1.Define point|2.Hit Enter|3.Mark"
self.spec_label1["font"] = "Courier 8"
self.spec_label1.pack(side="top", anchor="center", padx=5, pady=5)
### entry-field to enter the names of the points
self.point_spec = tk.Entry(self.point_specimagespec_frame)
self.point_spec["bg"] = "mint cream"
self.point_spec["fg"] = "grey25"
self.point_spec["bd"] = 2
self.point_spec["cursor"] = "xterm"
self.point_spec["width"] = 40
self.point_spec.pack(side="top", anchor="center", padx=5, pady=5)
self.point_spec.bind("<Return>", self.point_name_append)
self.print_to_frame = tk.Frame(self.point_specimagespec_frame)
self.print_to_frame.config(width=80, height=80)
self.print_to_frame.pack(side="top", fill="x", anchor="center")
self.between_label = tk.Label(self.print_to_frame)
self.between_label.config(width=40, height=1)
self.between_label["font"] = "Courier 8 bold"
self.between_label["text"] = " "
self.between_label.pack(side="bottom", anchor="center", pady=1)
### label to for the file specifier, which names the output file
self.file_spec_label = tk.Label(self.point_specimagespec_frame)
self.file_spec_label["text"] = "Name the Output File"
self.file_spec_label["font"] = "Courier 8"
self.file_spec_label.pack(side="top", anchor="center", padx=5, pady=5)
### Entry where the output file name is specified
self.save_to_file_name = tk.Entry(self.point_specimagespec_frame)
self.save_to_file_name["bg"] = "mint cream"
self.save_to_file_name["fg"] = "grey25"
self.save_to_file_name["bd"] = 2
self.save_to_file_name["cursor"] = "xterm"
self.save_to_file_name["width"] = 40
self.save_to_file_name.pack(side="top", anchor="center", padx=5, pady=5)
self.save_to_file_name.bind("<Return>", self.all_done_by_enter)
self.all_done_button = ttk.Button(self.point_specimagespec_frame,
command=self.all_done_func)
self.all_done_button["style"] = 'button_design.TButton'
self.all_done_button["cursor"] = "circle"
self.all_done_button["text"] = "✅ ALL DONE ✅"
self.all_done_button.pack(side="bottom", anchor="center", padx=5, pady=5)
### frame for the imagecanvas and the image all_points
self.point_specimagecanvas_frame = tk.Frame(self)
self.point_specimagecanvas_frame.config(width=1200)
self.point_specimagecanvas_frame.pack(side="top", fill="x", anchor="center")
self.image_resize(self.image_path)
### creates the Canvas where the image and scrollbars are stored
self.photocanvas = tk.Canvas(self.point_specimagecanvas_frame, width=1150, height=1000)
self.photocanvas.create_image(self.imagex.width() / 2, self.imagex.height() / 2,
anchor="center", image=self.imagex, tags="bg_img")
self.photocanvas.xview_moveto(0)
self.photocanvas.yview_moveto(0)
self.photocanvas["cursor"] = "crosshair"
self.photocanvas["highlightthickness"] = 5
self.horizontalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.HORIZONTAL)
self.horizontalbar.pack(side=tk.BOTTOM, fill=tk.X)
self.verticalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.VERTICAL)
self.verticalbar.pack(side=tk.RIGHT, fill=tk.Y)
self.horizontalbar.config(command=self.photocanvas.xview)
self.verticalbar.config(command=self.photocanvas.yview)
self.photocanvas.config(width=950, height=1000)
self.photocanvas.config(scrollregion=(2, 2, self.imagex.width(), self.imagex.height()))
self.photocanvas.config(xscrollcommand=self.horizontalbar.set, yscrollcommand=self.verticalbar.set)
self.photocanvas.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
### binds left-mouse-button to the point and the right-mouse button to the graph method
self.photocanvas.bind("<Button-1>", self.point)
self.photocanvas.bind("<Button-3>", self.graph)
def image_resize(self, image_path):
''' gets the image_path and stores it in the imagex variable '''
if self.resize_decision == True:
### if the resizing_box remains empty the image will be resized according to this rules
self.imagex = ImageTk.PhotoImage(file=image_path)
### this gets the dimensions of the inputted image
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
### resizing the image
if self.imagex.width() > 1000:
### the size_factor is an individual measure, larger pictures will have a larger size_factor
self.size_factor = floor(self.imagex.width() / 1000)
### and here is the size_factor applied
image = Image.open(self.image_path)
image = image.resize(
(self.imagex.width() // self.size_factor, self.imagex.height() // self.size_factor)
, Image.ANTIALIAS)
self.imagex = ImageTk.PhotoImage(image)
else:
self.size_factor = 1
pass
else:
### if the resizing_box is checked the image remains in the same size and and size_factor will be 1
self.imagex = ImageTk.PhotoImage(file=image_path)
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
self.size_factor = 1
self.manipulatedwidth, self.manipulatedheight = self.imagex.width(), self.imagex.height()
def point_name_append(self, event):
'''recieves the name of the points as specified by the user,
appends it to the list and prints it to the window'''
### recieves the point name from the entry area and appends it to the point_name list
self.pointname = self.point_spec.get()
self.point_names.append(self.pointname)
self.value_label = tk.Label(self.print_to_frame)
self.value_label.config(width=40, height=1)
self.value_label["font"] = "Courier 8 bold"
self.value_label["bg"] = "SeaGreen1"
self.value_label["fg"] = "black"
self.value_label.pack(side="top", anchor="center", pady=1)
### this adds the number of the point and the actual number to the point-list
self.value_label.configure(text="(%d) %s" % (len(self.point_names), self.pointname))
self.point_spec.delete(0, 'end')
return self.point_names, self.pointname
def point(self, event):
'''this function creates a point(oval) around the clicked point'''
self.x, self.y = self.photocanvas.canvasx(event.x), self.photocanvas.canvasy(event.y)
self.x_cor, self.y_cor = int(self.x), int(self.y)
self.last_point = self.photocanvas.create_oval(self.x_cor - 3, self.y_cor - 3,
self.x_cor + 3, self.y_cor + 3, fill="tomato")
### appending the x-coordinate to lists
self.all_points.append(self.x_cor)
self.x_cor_points.append(self.x_cor)
### appending the y-coordinate to lists
self.all_points.append(self.y_cor)
self.y_cor_points.append(self.y_cor)
self.coor_label = tk.Label(self.print_to_frame)
self.coor_label.config(width=40, height=1)
self.coor_label["font"] = "Courier 8 bold"
self.coor_label["bg"] = "CadetBlue1"
self.coor_label["fg"] = "black"
self.coor_label.pack(side="top", anchor="center", pady=1)
self.coor_label.configure(
text="(%d) X: %d | Y: %d" % ((len(self.all_points) / 2), self.x_cor, self.y_cor))
try:
### if there are points in the all-points list, the point names will be displayed onto the screen
self.label_on_canvas = tk.Label(self.photocanvas)
self.label_on_canvas["font"] = "Courier 6"
self.label_on_canvas["bg"] = "White"
self.label_on_canvas["fg"] = "black"
self.label_on_canvas["text"] = ""
self.label_on_canvas.pack()
self.label_on_canvas.configure(text="(%d) %s" % ((len(self.all_points) / 2), self.pointname))
# self.label_on_canvas.configure(text="%s" % (self.point_names[(len(self.all_points) // 2) - 1]))
### if/else to arrange the coordinate value names away from the center
### which could possibly cover other important parts of the image
if self.x_cor < (self.imagex.width() // 2): # on left side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor - 35, self.y_cor - 35,
window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor - 35, self.y_cor + 35,
window=self.label_on_canvas)
else: # on right side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor + 35, self.y_cor - 35,
window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor + 35, self.y_cor + 35,
window=self.label_on_canvas)
except IndexError:
### if no points have been named/identified an IndexError will occur
tkinter.messagebox.showinfo("Error Message", "Marked points on Image hasn't been defined yet")
self.label_on_canvas.configure(text="undefined point")
self.photocanvas.create_window(self.x_cor - 35, self.y_cor - 35, window=self.label_on_canvas)
self.photocanvas.delete(self.last_point)
self.coor_label.destroy()
del self.all_points[-2:]
del self.x_cor_points[-2:]
del self.y_cor_points[-2:]
return self.all_points
def graph(self, event):
'''this function creates a line between the points just to physically see the distance
without effecting anything in the calculations'''
global theline
self.photocanvas.create_line(self.all_points, tags="theline", width=4, fill="tomato")
def all_done_by_enter(self, event):
'''this function will be called by hitting Enter in the output file name entry area'''
self.all_done_func()
def all_done_func(self):
'''Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function'''
### returns the user the time the program has been used to mark and specify the points
enter_time = time.time()
print("Enter Time: --- %d minutes %0.3f seconds ---" %
(((enter_time - start_time) // 60), (enter_time - start_time) % 60))
self.num_of_calcs(len(self.point_names))
if self.all_points == [] or self.point_names == []:
### either no point has been marked on the foto or no points has been given a name
tkinter.messagebox.showinfo("Error Message", "No points have been marked on the image \n"
"or none of the points have been named!")
elif (len(self.all_points) / 2) != len(self.point_names):
### the number of marked points and the number of named points doesn't match
tkinter.messagebox.showinfo("Error Message",
"Number of marked points and Number of Named points doesn't match!")
else:
### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder
self.snapsaveCanvas()
with open('C:/output'
+ self.save_to_file_name.get() + '.csv', "w") as self.csv_file:
self.file_to_write = csv.writer(self.csv_file, lineterminator='\n')
self.file_to_write.writerow(["--------MAIN INFORMATION ABOUT THE FILE--------"])
self.file_to_write.writerow(["FILE NAME", self.save_to_file_name.get()])
self.file_to_write.writerow(["PATH IMAGE", self.pic_path.get()])
self.file_to_write.writerow(["DATE CREATED", str(datetime.now())])
self.file_to_write.writerow(
["ORIGINAL IMAGE DIMENSION", self.originalwidth, self.originalheight])
self.file_to_write.writerow(["COMPRESSED IMAGE DIMENSION", self.manipulatedwidth,
self.manipulatedheight, "Factor %s" % (self.size_factor)])
### calls the points and calculations function
self.all_points_and_calculations_to_file()
self.csv_file.close()
calc_time = time.time()
print("Calc Time: --- %d minutes %0.3f seconds ---" %
(((calc_time - enter_time) // 60), (calc_time - enter_time) % 60))
### terminates the program
self.quit()
def num_of_calcs(self, x):
'''control function, which returns the numbers calculations the program performs'''
number = x
print('Number of points:', number)
firstresult = 0
secondresult = 0
for i in range(0, number):
firstresult += i
print('Number of distances:', firstresult)
for j in range(0, firstresult):
secondresult += j
print('Number of ratios:', secondresult)
def all_points_and_calculations_to_file(self):
'''this function stores all points, all calculation about the distances and ratios in the image'''
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------COORDINATES OF ALL POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["NAME OF THE POINT", "X-COORDINATE", "Y-COORDINATE"])
for i in range(len(self.point_names)):
### appends in point name list: NAME, X, Y and prints those three values to the output file
self.names_with_points.append(self.point_names[i])
self.names_with_points.append(self.x_cor_points[i])
self.names_with_points.append(self.y_cor_points[i])
self.file_to_write.writerow([self.point_names[i], self.x_cor_points[i], self.y_cor_points[i]])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------DISTANCES BETWEEN POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST POINT->2ND POINT", "DISTANCE"])
while self.names_with_points != []:
### pops of the keys with their respective points from the first position
key1 = self.names_with_points.pop(0)
x1 = self.names_with_points.pop(0)
y1 = self.names_with_points.pop(0)
for i in range(0, len(self.names_with_points), 3):
### to calculate the distances all other distances between key1 are calculated
key2 = self.names_with_points[i]
x2 = self.names_with_points[i + 1]
y2 = self.names_with_points[i + 2]
### distance result in a X-Y plane are simply calculated through the Pythagorean theorem
result = sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
### the distances are than stored in a list in a similar fashion DISTANCE : NUMBER VALUE
self.distance_between_points.append('%s->%s' % (key1, key2))
self.distance_between_points.append(result)
self.file_to_write.writerow(['%s->%s' % (key1, key2), "%0.4f" % result])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------RATIO BETWEEN EACH DISTANCES--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST DIST/2ND DIST", "RATIO"])
while self.distance_between_points != []:
### the ratios are again calculated similarly
### the first distance is poped of with its value
distance1 = self.distance_between_points.pop(0)
num1 = self.distance_between_points.pop(0)
number1 = float(num1)
for i in range(0, len(self.distance_between_points), 2):
### distance1 is then compared to all other distances
distance2 = self.distance_between_points[i]
num2 = self.distance_between_points[i + 1]
number2 = float(num2)
normal_result = number1 / number2
reversed_result = number2 / number1
self.file_to_write.writerow(["%s/%s" % (distance1, distance2), "%0.4f" % (normal_result)])
self.file_to_write.writerow(["%s/%s" % (distance2, distance1), "%0.4f" % (reversed_result)])
self.file_to_write.writerow([""])
def snapsaveCanvas(self):
''' this function is called, when all inputs by the user are done and takes a screenshot of the image canvas'''
canvas = self.canvas_info() # Get Window Coordinates of Canvas
self.grabcanvas = ImageGrab.grab(bbox=canvas).save(
'C:/output'
+ self.save_to_file_name.get() + '_image_out' + '.jpg')
def canvas_info(self):
'''part of the snapsaveCanvas function, gets the information about the visible canvas'''
x = self.photocanvas.winfo_rootx() + self.photocanvas.winfo_x()
y = self.photocanvas.winfo_rooty() + self.photocanvas.winfo_y()
x1 = x + self.photocanvas.winfo_width()
y1 = y + self.photocanvas.winfo_height()
box = (x, y, x1, y1)
return box
class Distance_spec_mode(tk.Frame):
'''the Distance specific mode lets the user specify ratios that he/she thinks are significant to compare'''
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.point_speclabelframe = tk.Frame(self)
self.point_speclabelframe.config(height=40)
self.point_speclabelframe.pack(side="top")
self.point_speclabelframe.grid_rowconfigure(0, weight=1)
self.point_speclabelframe.grid_columnconfigure(0, weight=1)
self.main_label = tk.Label(self.point_speclabelframe)
self.main_label["text"] = "Distance specific mode"
self.main_label["font"] = "Courier 11 bold"
self.main_label.pack(fill="both", anchor="center")
self.point_specpath_input_frame = tk.Frame(self)
self.point_specpath_input_frame.config(height=50)
self.point_specpath_input_frame.pack(side="top", fill="x", anchor="center")
### entry-field to enter the image path
global pic_path
self.pic_path = tk.Entry(self.point_specpath_input_frame)
self.pic_path["bg"] = "mint cream"
self.pic_path["fg"] = "grey25"
self.pic_path["bd"] = 2
self.pic_path["cursor"] = "xterm"
self.pic_path["width"] = 80
self.pic_path.delete(0, "end")
self.pic_path.insert(0, "C:/output")
self.pic_path.pack(side="left", fill="x", expand=True, anchor="center", padx=50, pady=10)
self.pic_path.bind("<Return>", self.upload_by_enter)
### in the default-status the uploaded image will be resized, because the image should be taken by powerful
### cameras; the box should be checked if smaller structures are considered
self.resize_decision = True
self.resize_checkbox = ttk.Checkbutton(self.point_specpath_input_frame, command=self.image_resize_question)
self.resize_checkbox["text"] = "Not Resize image"
self.resize_checkbox.pack(side="left", anchor="center")
### upload button, which will create the canvas with the image specified in the image path entry field
self.uploadbutton = ttk.Button(self.point_specpath_input_frame,
command=self.image_to_canvas)
self.uploadbutton["style"] = 'button_design.TButton'
self.uploadbutton["cursor"] = "circle"
self.uploadbutton["text"] = "✅"
self.uploadbutton.pack(side="right", anchor="center", padx=50, pady=10)
def upload_by_enter(self, event):
'''this function sorely connects the "Press Enter" function the upload function'''
self.image_to_canvas()
def image_resize_question(self):
'''this function will activated through checking the box and the image will not be resized'''
self.resize_decision = False
return self.resize_decision
def image_to_canvas(self):
'''this function loads the image'''
try:
'''if the image exists the image will be opened, if not an IOError will be returned'''
f = open(self.pic_path.get())
f.close()
self.image_path = self.pic_path.get()
### initialing lists, which are used by the program to store the values of the points
self.all_points = []
self.x_cor_points = []
self.y_cor_points = []
self.distance_names = []
self.name_and_distance = []
self.pointname = "No Pointname given"
### options and entry fields on the right side
### everything has been packed into a canvas to be able to scroll around it with growing size, due to points
self.right_side_canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.right_side_canvas.config(width=self.winfo_screenwidth()-1150, height=self.winfo_screenheight()-135)
self.point_specimagespec_frame = tk.Frame(self.right_side_canvas, background="#ffffff")
self.verticalbar_right = tk.Scrollbar(self, orient="vertical", command=self.right_side_canvas.yview)
self.right_side_canvas.configure(yscrollcommand=self.verticalbar_right.set)
self.verticalbar_right.pack(side="right", fill="y")
self.right_side_canvas.pack(side="right", fill="x", expand=True)
self.right_side_canvas.create_window((4, 4), window=self.point_specimagespec_frame,
anchor="n", tags="self.frame")
### with every pressed "Enter" the Frame will be updated and made available for the scrollbar
self.point_specimagespec_frame.bind("<Configure>", self.onFrameConfigure)
self.populate()
#self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(),
# self.right_side_canvas.winfo_y(),
# self.right_side_canvas.winfo_width(),
# self.right_side_canvas.winfo_height()))
except IOError:
tkinter.messagebox.showinfo("Error Message", "Image couldn't be found in image path.")
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.right_side_canvas.configure(scrollregion=self.right_side_canvas.bbox("all"))
def populate(self):
''' this function lets the sidebar grow with newly added labels'''
### head-label: tells the user that this is the point specifier
self.spec_label = tk.Label(self.point_specimagespec_frame)
self.spec_label["text"] = "Distance specifier"
self.spec_label["font"] = "Courier 8 bold"
self.spec_label.pack(side="top", anchor="center", padx=5, pady=5)
self.spec_label1 = tk.Label(self.point_specimagespec_frame)
self.spec_label1["text"] = "1.Define distance name|\n" \
"2.Hit Enter|3.Mark both points"
self.spec_label1["font"] = "Courier 8"
self.spec_label1.pack(side="top", anchor="center", padx=5, pady=5)
### entry-field to enter the names of the points
self.point_spec = tk.Entry(self.point_specimagespec_frame)
self.point_spec["bg"] = "mint cream"
self.point_spec["fg"] = "grey25"
self.point_spec["bd"] = 2
self.point_spec["cursor"] = "xterm"
self.point_spec["width"] = 40
self.point_spec.pack(side="top", anchor="center", padx=5, pady=5)
self.point_spec.bind("<Return>", self.point_name_append)
self.print_to_frame = tk.Frame(self.point_specimagespec_frame)
self.print_to_frame.config(width=80, height=80)
self.print_to_frame.pack(side="top", fill="x", anchor="center")
self.between_label = tk.Label(self.print_to_frame)
self.between_label.config(width=40, height=1)
self.between_label["font"] = "Courier 8 bold"
self.between_label["text"] = " "
self.between_label.pack(side="bottom", anchor="center", pady=1)
### label to for the file specifier, which names the output file
self.file_spec_label = tk.Label(self.point_specimagespec_frame)
self.file_spec_label["text"] = "Name the Output File"
self.file_spec_label["font"] = "Courier 8"
self.file_spec_label.pack(side="top", anchor="center", padx=5, pady=5)
### Entry where the output file name is specified
self.save_to_file_name = tk.Entry(self.point_specimagespec_frame)
self.save_to_file_name["bg"] = "mint cream"
self.save_to_file_name["fg"] = "grey25"
self.save_to_file_name["bd"] = 2
self.save_to_file_name["cursor"] = "xterm"
self.save_to_file_name["width"] = 40
self.save_to_file_name.pack(side="top", anchor="center", padx=5, pady=5)
self.save_to_file_name.bind("<Return>", self.all_done_by_enter)
self.all_done_button = ttk.Button(self.point_specimagespec_frame,
command=self.all_done_func)
self.all_done_button["style"] = 'button_design.TButton'
self.all_done_button["text"] = "✅ ALL DONE ✅"
self.all_done_button["cursor"] = "circle"
self.all_done_button.pack(side="bottom", anchor="center", padx=5, pady=5)
### frame for the imagecanvas and the image all_points
self.point_specimagecanvas_frame = tk.Frame(self)
self.point_specimagecanvas_frame.config(width=1200)
self.point_specimagecanvas_frame.pack(side="top", fill="x", anchor="center")
self.image_resize(self.image_path)
### creates the Canvas where the image and scrollbars are stored
self.photocanvas = tk.Canvas(self.point_specimagecanvas_frame, width=1150, height=1000)
self.photocanvas.create_image(self.imagex.width() / 2, self.imagex.height() / 2,
anchor="center", image=self.imagex, tags="bg_img")
self.photocanvas.xview_moveto(0)
self.photocanvas.yview_moveto(0)
self.photocanvas["cursor"] = "crosshair"
self.photocanvas["highlightthickness"] = 5
self.horizontalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.HORIZONTAL)
self.horizontalbar.pack(side=tk.BOTTOM, fill=tk.X)
self.verticalbar = tk.Scrollbar(self.point_specimagecanvas_frame, orient=tk.VERTICAL)
self.verticalbar.pack(side=tk.RIGHT, fill=tk.Y)
self.horizontalbar.config(command=self.photocanvas.xview)
self.verticalbar.config(command=self.photocanvas.yview)
self.photocanvas.config(width=950, height=1000)
self.photocanvas.config(scrollregion=(2, 2, self.imagex.width(), self.imagex.height()))
self.photocanvas.config(xscrollcommand=self.horizontalbar.set, yscrollcommand=self.verticalbar.set)
self.photocanvas.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
### binds left-mouse-button to the point and the right-mouse button to the graph method
self.photocanvas.bind("<Button-1>", self.point)
def image_resize(self, image_path):
''' gets the image_path and stores it in the imagex variable '''
if self.resize_decision == True:
### if the resizing_box remains empty the image will be resized according to this rules
self.imagex = ImageTk.PhotoImage(file=image_path)
### this gets the dimensions of the inputted image
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
### resizing the image
if self.imagex.width() > 1000:
### the size_factor is an individual measure, larger pictures will have a larger size_factor
self.size_factor = floor(self.imagex.width() / 1000)
### and here is the size_factor applied
image = Image.open(self.image_path)
image = image.resize((self.imagex.width() // self.size_factor, self.imagex.height() // self.size_factor)
, Image.ANTIALIAS)
self.imagex = ImageTk.PhotoImage(image)
else:
self.size_factor = 1
pass
else:
### if the resizing_box is checked the image remains in the same size and and size_factor will be 1
self.imagex = ImageTk.PhotoImage(file=image_path)
self.originalwidth, self.originalheight = self.imagex.width(), self.imagex.height()
self.size_factor = 1
self.manipulatedwidth, self.manipulatedheight = self.imagex.width(), self.imagex.height()
def point_name_append(self, event):
'''recieves the name of the points as specified by the user,
appends it to the list and prints it to the window'''
### recieves the point name from the entry area and appends it to the point_name list
self.pointname = self.point_spec.get()
self.distance_names.append(self.pointname)
self.value_label = tk.Label(self.print_to_frame)
self.value_label.config(width=40, height=1)
self.value_label["font"] = "Courier 8 bold"
self.value_label["bg"] = "SeaGreen1"
self.value_label["fg"] = "black"
self.value_label.pack(side="top", anchor="center", pady=1)
### this adds the number of the point and the actual number to the point-list
self.value_label.configure(text="(%d) %s" % (len(self.distance_names), self.pointname))
self.point_spec.delete(0, 'end')
return self.distance_names, self.pointname
def point(self, event):
'''this function creates a point(oval) around the clicked point'''
self.x, self.y = self.photocanvas.canvasx(event.x), self.photocanvas.canvasy(event.y)
self.x_cor, self.y_cor = int(self.x), int(self.y)
self.last_point = self.photocanvas.create_oval(self.x_cor - 3, self.y_cor - 3,
self.x_cor + 3, self.y_cor + 3, fill="tomato")
### appending the x-coordinate to lists
self.all_points.append(self.x_cor)
self.x_cor_points.append(self.x_cor)
### appending the y-coordinate to lists
self.all_points.append(self.y_cor)
self.y_cor_points.append(self.y_cor)
if (len(self.x_cor_points) % 2 == 0):
### if they are two or multiple of 2 x-coordinates the list a line will be created between those points
global theline
self.photocanvas.create_line(self.all_points[-4], self.all_points[-3], self.all_points[-2],
self.all_points[-1], tags="theline", width=2, fill="tomato")
self.coor_label = tk.Label(self.print_to_frame)
self.coor_label.config(width=40, height=1)
self.coor_label["font"] = "Courier 8 bold"
self.coor_label["bg"] = "CadetBlue1"
self.coor_label["fg"] = "black"
self.coor_label.pack(side="top", anchor="center", pady=1)
self.coor_label.configure(text="(%d)X1:%d|Y1:%d-->X2:%d|Y2:%d" % ((len(self.all_points) / 4),
self.all_points[-4], self.all_points[-3],
self.all_points[-2],self.all_points[-1]))
try:
### if there are points in the all-points list, the point names will be displayed onto the screen
self.label_on_canvas = tk.Label(self.photocanvas)
self.label_on_canvas["font"] = "Courier 6"
self.label_on_canvas["bg"] = "White"
self.label_on_canvas["fg"] = "black"
self.label_on_canvas["text"] = ""
self.label_on_canvas.pack()
self.label_on_canvas.configure(text="(%d) %s" % ((len(self.all_points)/4), self.pointname))
### if/else to arrange the coordinate value names away from the center
### which could possibly cover other important parts of the image
if self.x_cor < (self.imagex.width() // 2): # on left side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor - 25, self.y_cor - 25, window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor - 25, self.y_cor + 25, window=self.label_on_canvas)
else: # on right side
if self.y_cor < (self.imagex.height() // 2): # on top
self.photocanvas.create_window(self.x_cor + 25, self.y_cor - 25, window=self.label_on_canvas)
else: # on bottom
self.photocanvas.create_window(self.x_cor + 25, self.y_cor + 25, window=self.label_on_canvas)
except IndexError:
### if no points have been named/identified an IndexError will occur
tkinter.messagebox.showinfo("Error Message", "Marked points on Image hasn't been defined yet")
self.label_on_canvas.configure(text="undefined point")
self.photocanvas.create_window(self.x_cor - 25, self.y_cor - 25, window=self.label_on_canvas)
self.photocanvas.delete(self.last_point)
self.coor_label.destroy()
del self.all_points[-2:]
del self.x_cor_points[-2:]
del self.y_cor_points[-2:]
return self.all_points
def all_done_by_enter(self, event):
'''this function will be called by hitting Enter in the output file name entry area'''
self.all_done_func()
def all_done_func(self):
'''Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function'''
### returns the user the time the program has been used to mark and specify the points
enter_time = time.time()
print("Enter Time: --- %d minutes %0.3f seconds ---" %
(((enter_time - start_time) // 60), (enter_time - start_time) % 60))
self.num_of_calcs(len(self.distance_names))
if self.all_points == [] or self.distance_names == []:
### either no point has been marked on the foto or no points has been given a name
tkinter.messagebox.showinfo("Error Message", "No points have been marked on the image \n"
"or none of the points have been named!")
elif (len(self.all_points)/4) != len(self.distance_names):
### the number of marked points and the number of named points doesn't match
tkinter.messagebox.showinfo("Error Message",
"Number of marked points and Number of Named points doesn't match!")
else:
### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder
self.snapsaveCanvas()
with open('C:/output'
+ self.save_to_file_name.get() + '.csv', "w") as self.csv_file:
self.file_to_write = csv.writer(self.csv_file, lineterminator = '\n')
self.file_to_write.writerow(["--------MAIN INFORMATION ABOUT THE FILE--------"])
self.file_to_write.writerow(["FILE NAME", self.save_to_file_name.get()])
self.file_to_write.writerow(["PATH IMAGE", self.pic_path.get()])
self.file_to_write.writerow(["DATE CREATED", str(datetime.now())])
self.file_to_write.writerow(["ORIGINAL IMAGE DIMENSION", self.originalwidth, self.originalheight])
self.file_to_write.writerow(["COMPRESSED IMAGE DIMENSION", self.manipulatedwidth,
self.manipulatedheight, "Factor %s" % (self.size_factor)])
### calls the points and calculations function
self.all_points_and_calculations_to_file()
self.csv_file.close()
calc_time = time.time()
print("Calc Time: --- %d minutes %0.3f seconds ---" %
(((calc_time - enter_time) // 60), (calc_time - enter_time) % 60))
### terminates the program
self.quit()
def num_of_calcs(self, x):
'''control function, which returns the numbers calculations the program performs'''
number = x*2
print('Number of points:', number)
firstresult = 0
secondresult = 0
for i in range(0, number):
firstresult += i
print('Number of distances:', firstresult)
for j in range(0, firstresult):
secondresult += j
print('Number of ratios:', secondresult)
def all_points_and_calculations_to_file(self):
'''this function stores all points, all calculation about the distances and ratios in the image'''
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------COORDINATES OF ALL POINTS--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["NAME OF THE POINT", "X1-COORDINATE", "Y1-COORDINATE",
"X2-COORDINATE", "Y2-COORDINATE", "DISTANCE"])
for i in range(len(self.distance_names)):
### appends in point name list: NAME, X, Y and prints those three values to the output file
self.name_and_distance.append(self.distance_names[i])
self.x_zero = self.x_cor_points.pop(0)
self.y_zero = self.y_cor_points.pop(0)
self.x_one = self.x_cor_points.pop(0)
self.y_one = self.y_cor_points.pop(0)
result = sqrt((self.x_zero - self.x_one) ** 2 + (self.y_zero - self.y_one) ** 2)
self.name_and_distance.append(result)
self.file_to_write.writerow([self.distance_names[i], self.x_zero, self.y_zero,
self.x_one, self.y_one, result])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["--------RATIO BETWEEN EACH DISTANCES--------"])
self.file_to_write.writerow([""])
self.file_to_write.writerow(["1ST DIST/2ND DIST", "RATIO"])
while self.name_and_distance != []:
### the ratios are again calculated similarly
### the first distance is poped of with its value
distance_name_zero = self.name_and_distance.pop(0)
num1 = self.name_and_distance.pop(0)
number1 = float(num1)
for i in range(0, len(self.name_and_distance), 2):
### distance_name_zero is then compared to all other distances
distance_name_one = self.name_and_distance[i]
num2 = self.name_and_distance[i + 1]
number2 = float(num2)
normal_result = number1 / number2
reversed_result = number2 / number1
self.file_to_write.writerow(["%s/%s" % (distance_name_zero, distance_name_one), "%0.4f" % (normal_result)])
self.file_to_write.writerow(["%s/%s" % (distance_name_one, distance_name_zero), "%0.4f" % (reversed_result)])
self.file_to_write.writerow([""])
def snapsaveCanvas(self):
''' this function is called, when all inputs by the user are done and takes a screenshot of the image canvas'''
canvas = self.canvas_info() # Get Window Coordinates of Canvas
self.grabcanvas = ImageGrab.grab(bbox=canvas).save('C:/output'
+ self.save_to_file_name.get() + '_image_out' + '.jpg')
def canvas_info(self):
'''part of the snapsaveCanvas function, gets the information about the visible canvas'''
x = self.photocanvas.winfo_rootx()+self.photocanvas.winfo_x()
y = self.photocanvas.winfo_rooty()+self.photocanvas.winfo_y()
x1 = x+self.photocanvas.winfo_width()
y1 = y+self.photocanvas.winfo_height()
box = (x, y, x1, y1)
return box
class Angles_spec_mode(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.title_label = tk.Label(self)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.grid(columnspan=4, row=0, pady=gen_pady, padx=gen_padx)
self.main_label = tk.Label(self)
self.main_label["text"] = "Angles specific mode"
self.main_label["font"] = the_font
self.main_label.grid(columnspan=4, row=1, pady=gen_pady, padx=gen_padx)
self.startpage_button = ttk.Button(self, command=lambda: controller.show_frame(StartPage))
self.startpage_button["text"] = "Startpage"
self.startpage_button.grid(column=0, row=2)
self.point_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point_spec_mode_file_get"
self.point_spec_button.grid(column=1, row=2)
self.distance_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance_spec_mode"
self.distance_spec_button.grid(column=2, row=2)
self.area_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Area_spec_mode))
self.area_spec_button["text"] = "Area_spec_mode"
self.area_spec_button.grid(column=3, row=2)
class Area_spec_mode(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.title_label = tk.Label(self)
self.title_label["text"] = title
self.title_label["font"] = "Courier 14 bold"
self.title_label.grid(columnspan=4, row=0, pady=gen_pady, padx=gen_padx)
self.main_label = tk.Label(self)
self.main_label["text"] = "Area specific mode"
self.main_label["font"] = the_font
self.main_label.grid(columnspan=4, row=1, pady=gen_pady, padx=gen_padx)
self.startpage_button = ttk.Button(self, command=lambda: controller.show_frame(StartPage))
self.startpage_button["text"] = "Startpage"
self.startpage_button.grid(column=0, row=2)
self.point_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Point_spec_mode))
self.point_spec_button["text"] = "Point_spec_mode_file_get"
self.point_spec_button.grid(column=1, row=2)
self.distance_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Distance_spec_mode))
self.distance_spec_button["text"] = "Distance_spec_mode"
self.distance_spec_button.grid(column=2, row=2)
self.angles_spec_button = ttk.Button(self, command=lambda: controller.show_frame(Angles_spec_mode))
self.angles_spec_button["text"] = "Angles_spec_mode"
self.angles_spec_button.grid(column=3, row=2)
if __name__ == "__main__":
start_time = time.time()
application = MorphoMath()
application.mainloop()
end_time = time.time()
print("Successful program execution")
print("Total Time: --- %d minutes %0.3f seconds ---" %
(((end_time - start_time) // 60), (end_time - start_time) % 60))
|
en
| 0.839035
|
#!/usr/bin/python Code written in Python 3.5 by KiliBio Missions accomplished:
General design ✓
Point-specific mode ✓
Ratio-specific mode
Angle-specific mode
Area-specific mode
all to log_file ✓ # the MorphoMath class inherits from the Tk class in the tk module # the init function accepts arguments and keyword arguments, will always run __init__ function to initiate the automatic function to create the widgets # sets the size of the main window automatically to the size of the screen # icon in the upper-left corner # title of the window #self.mainframe["bg"] = "khaki2" # weight --> prioritizes things # the allframes dictionary stores all frames and make them acessable to switch between windows this for loop consists of all windows in the application # there is no controller here which could be accessed! # Start Page WARNING: Use %s at your own risks!\n
There is no promise of exact calculations or any kind of warranty!\n
Check the source-code to make sure that the calculations are correct or in a way you want them to. From the author of this program it is strongly recommended,\n
to use this program and the results with an absolute critical evaluation. # Start Page the Point-specific mode allow the user to specify an arbitrary amount of points and get a file of ratios ### entry-field to enter the image path ### in the default-status the uploaded image will be resized, because the image should be taken by powerful ### cameras; the box should be checked if smaller structures are considered ### upload button, which will create the canvas with the image specified in the image path entry field this function sorely connects the "Press Enter" function the upload function this function will activated through checking the box and the image will not be resized this function loads the image if the image exists the image will be opened, if not an IOError will be returned ### initialing lists, which are used by the program to store the values of the points ### options and entry fields on the right side ### everything has been packed into a canvas to be able to scroll around it with growing size, due to points ### with every pressed "Enter" the Frame will be updated and made available for the scrollbar # self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(), # self.right_side_canvas.winfo_y(), # self.right_side_canvas.winfo_width(), # self.right_side_canvas.winfo_height())) Reset the scroll region to encompass the inner frame this function lets the sidebar grow with newly added labels ### head-label: tells the user that this is the point specifier ### entry-field to enter the names of the points ### label to for the file specifier, which names the output file ### Entry where the output file name is specified ### frame for the imagecanvas and the image all_points ### creates the Canvas where the image and scrollbars are stored ### binds left-mouse-button to the point and the right-mouse button to the graph method gets the image_path and stores it in the imagex variable ### if the resizing_box remains empty the image will be resized according to this rules ### this gets the dimensions of the inputted image ### resizing the image ### the size_factor is an individual measure, larger pictures will have a larger size_factor ### and here is the size_factor applied ### if the resizing_box is checked the image remains in the same size and and size_factor will be 1 recieves the name of the points as specified by the user,
appends it to the list and prints it to the window ### recieves the point name from the entry area and appends it to the point_name list ### this adds the number of the point and the actual number to the point-list this function creates a point(oval) around the clicked point ### appending the x-coordinate to lists ### appending the y-coordinate to lists ### if there are points in the all-points list, the point names will be displayed onto the screen # self.label_on_canvas.configure(text="%s" % (self.point_names[(len(self.all_points) // 2) - 1])) ### if/else to arrange the coordinate value names away from the center ### which could possibly cover other important parts of the image # on left side # on top # on bottom # on right side # on top # on bottom ### if no points have been named/identified an IndexError will occur this function creates a line between the points just to physically see the distance
without effecting anything in the calculations this function will be called by hitting Enter in the output file name entry area Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function ### returns the user the time the program has been used to mark and specify the points ### either no point has been marked on the foto or no points has been given a name ### the number of marked points and the number of named points doesn't match ### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder ### calls the points and calculations function ### terminates the program control function, which returns the numbers calculations the program performs this function stores all points, all calculation about the distances and ratios in the image ### appends in point name list: NAME, X, Y and prints those three values to the output file ### pops of the keys with their respective points from the first position ### to calculate the distances all other distances between key1 are calculated ### distance result in a X-Y plane are simply calculated through the Pythagorean theorem ### the distances are than stored in a list in a similar fashion DISTANCE : NUMBER VALUE ### the ratios are again calculated similarly ### the first distance is poped of with its value ### distance1 is then compared to all other distances this function is called, when all inputs by the user are done and takes a screenshot of the image canvas # Get Window Coordinates of Canvas part of the snapsaveCanvas function, gets the information about the visible canvas the Distance specific mode lets the user specify ratios that he/she thinks are significant to compare ### entry-field to enter the image path ### in the default-status the uploaded image will be resized, because the image should be taken by powerful ### cameras; the box should be checked if smaller structures are considered ### upload button, which will create the canvas with the image specified in the image path entry field this function sorely connects the "Press Enter" function the upload function this function will activated through checking the box and the image will not be resized this function loads the image if the image exists the image will be opened, if not an IOError will be returned ### initialing lists, which are used by the program to store the values of the points ### options and entry fields on the right side ### everything has been packed into a canvas to be able to scroll around it with growing size, due to points ### with every pressed "Enter" the Frame will be updated and made available for the scrollbar #self.right_side_canvas.config(scrollregion=(self.right_side_canvas.winfo_x(), # self.right_side_canvas.winfo_y(), # self.right_side_canvas.winfo_width(), # self.right_side_canvas.winfo_height())) Reset the scroll region to encompass the inner frame this function lets the sidebar grow with newly added labels ### head-label: tells the user that this is the point specifier ### entry-field to enter the names of the points ### label to for the file specifier, which names the output file ### Entry where the output file name is specified ### frame for the imagecanvas and the image all_points ### creates the Canvas where the image and scrollbars are stored ### binds left-mouse-button to the point and the right-mouse button to the graph method gets the image_path and stores it in the imagex variable ### if the resizing_box remains empty the image will be resized according to this rules ### this gets the dimensions of the inputted image ### resizing the image ### the size_factor is an individual measure, larger pictures will have a larger size_factor ### and here is the size_factor applied ### if the resizing_box is checked the image remains in the same size and and size_factor will be 1 recieves the name of the points as specified by the user,
appends it to the list and prints it to the window ### recieves the point name from the entry area and appends it to the point_name list ### this adds the number of the point and the actual number to the point-list this function creates a point(oval) around the clicked point ### appending the x-coordinate to lists ### appending the y-coordinate to lists ### if they are two or multiple of 2 x-coordinates the list a line will be created between those points ### if there are points in the all-points list, the point names will be displayed onto the screen ### if/else to arrange the coordinate value names away from the center ### which could possibly cover other important parts of the image # on left side # on top # on bottom # on right side # on top # on bottom ### if no points have been named/identified an IndexError will occur this function will be called by hitting Enter in the output file name entry area Handler: is called, when the "All done" Button is pressed or the Enter Button is hit;
initiates calculations and print to file function ### returns the user the time the program has been used to mark and specify the points ### either no point has been marked on the foto or no points has been given a name ### the number of marked points and the number of named points doesn't match ### everything is ok --> visible part of the image-canvas will be printed and saved to the output folder ### calls the points and calculations function ### terminates the program control function, which returns the numbers calculations the program performs this function stores all points, all calculation about the distances and ratios in the image ### appends in point name list: NAME, X, Y and prints those three values to the output file ### the ratios are again calculated similarly ### the first distance is poped of with its value ### distance_name_zero is then compared to all other distances this function is called, when all inputs by the user are done and takes a screenshot of the image canvas # Get Window Coordinates of Canvas part of the snapsaveCanvas function, gets the information about the visible canvas
| 2.858919
| 3
|
venv/lib/python2.7/site-packages/telegram/bot.py
|
kennethcyh/Secret-Santa-Bot
| 0
|
6625378
|
<filename>venv/lib/python2.7/site-packages/telegram/bot.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=E0611,E0213,E1102,C0103,E1101,W0613,R0913,R0904
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Bot."""
import functools
try:
import ujson as json
except ImportError:
import json
import logging
import warnings
from datetime import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from future.utils import string_types
from telegram import (User, Message, Update, Chat, ChatMember, UserProfilePhotos, File,
ReplyMarkup, TelegramObject, WebhookInfo, GameHighScore, StickerSet,
PhotoSize, Audio, Document, Sticker, Video, Animation, Voice, VideoNote,
Location, Venue, Contact, InputFile)
from telegram.error import InvalidToken, TelegramError
from telegram.utils.helpers import to_timestamp
from telegram.utils.request import Request
logging.getLogger(__name__).addHandler(logging.NullHandler())
def info(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
if not self.bot:
self.get_me()
result = func(self, *args, **kwargs)
return result
return decorator
def log(func):
logger = logging.getLogger(func.__module__)
@functools.wraps(func)
def decorator(self, *args, **kwargs):
logger.debug('Entering: %s', func.__name__)
result = func(self, *args, **kwargs)
logger.debug(result)
logger.debug('Exiting: %s', func.__name__)
return result
return decorator
def message(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
url, data = func(self, *args, **kwargs)
if kwargs.get('reply_to_message_id'):
data['reply_to_message_id'] = kwargs.get('reply_to_message_id')
if kwargs.get('disable_notification'):
data['disable_notification'] = kwargs.get('disable_notification')
if kwargs.get('reply_markup'):
reply_markup = kwargs.get('reply_markup')
if isinstance(reply_markup, ReplyMarkup):
data['reply_markup'] = reply_markup.to_json()
else:
data['reply_markup'] = reply_markup
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
if result is True:
return result
return Message.de_json(result, self)
return decorator
class Bot(TelegramObject):
"""This object represents a Telegram Bot.
Args:
token (:obj:`str`): Bot's unique authentication.
base_url (:obj:`str`, optional): Telegram Bot API service URL.
base_file_url (:obj:`str`, optional): Telegram Bot API file URL.
request (:obj:`telegram.utils.request.Request`, optional): Pre initialized
:obj:`telegram.utils.request.Request`.
private_key (:obj:`bytes`, optional): Private key for decryption of telegram passport data.
private_key_password (:obj:`bytes`, optional): Password for above private key.
"""
def __init__(self, token, base_url=None, base_file_url=None, request=None, private_key=None,
private_key_password=None):
self.token = self._validate_token(token)
if base_url is None:
base_url = 'https://api.telegram.org/bot'
if base_file_url is None:
base_file_url = 'https://api.telegram.org/file/bot'
self.base_url = str(base_url) + str(self.token)
self.base_file_url = str(base_file_url) + str(self.token)
self.bot = None
self._request = request or Request()
self.logger = logging.getLogger(__name__)
if private_key:
self.private_key = serialization.load_pem_private_key(private_key,
password=<PASSWORD>_key_password,
backend=default_backend())
@property
def request(self):
return self._request
@staticmethod
def _validate_token(token):
"""A very basic validation on token."""
if any(x.isspace() for x in token):
raise InvalidToken()
left, sep, _right = token.partition(':')
if (not sep) or (not left.isdigit()) or (len(left) < 3):
raise InvalidToken()
return token
@property
@info
def id(self):
""":obj:`int`: Unique identifier for this bot."""
return self.bot.id
@property
@info
def first_name(self):
""":obj:`str`: Bot's first name."""
return self.bot.first_name
@property
@info
def last_name(self):
""":obj:`str`: Optional. Bot's last name."""
return self.bot.last_name
@property
@info
def username(self):
""":obj:`str`: Bot's username."""
return self.bot.username
@property
def name(self):
""":obj:`str`: Bot's @username."""
return '@{0}'.format(self.username)
@log
def get_me(self, timeout=None, **kwargs):
"""A simple method for testing your bot's auth token. Requires no parameters.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.User`: A :class:`telegram.User` instance representing that bot if the
credentials are valid, :obj:`None` otherwise.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getMe'.format(self.base_url)
result = self._request.get(url, timeout=timeout)
self.bot = User.de_json(result, self)
return self.bot
@log
@message
def send_message(self,
chat_id,
text,
parse_mode=None,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send text messages.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
text (:obj:`str`): Text of the message to be sent. Max 4096 characters. Also found as
:attr:`telegram.constants.MAX_MESSAGE_LENGTH`.
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold,
italic, fixed-width text or inline URLs in your bot's message. See the constants in
:class:`telegram.ParseMode` for the available modes.
disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in
this message.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options.
A JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'text': text}
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
def delete_message(self, chat_id, message_id, timeout=None, **kwargs):
"""
Use this method to delete a message. A message can only be deleted if it was sent less
than 48 hours ago. Any such recently sent outgoing message may be deleted. Additionally,
if the bot is an administrator in a group chat, it can delete any message. If the bot is
an administrator in a supergroup, it can delete messages from any other user and service
messages about people joining or leaving the group (other types of service messages may
only be removed by the group creator). In channels, bots can only remove their own
messages.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`): Identifier of the message to delete.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'message_id': message_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
@message
def forward_message(self,
chat_id,
from_chat_id,
message_id,
disable_notification=False,
timeout=None,
**kwargs):
"""Use this method to forward messages of any kind.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
from_chat_id (:obj:`int` | :obj:`str`): Unique identifier for the chat where the
original message was sent (or channel username in the format @channelusername).
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
message_id (:obj:`int`): Message identifier in the chat specified in from_chat_id.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/forwardMessage'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if from_chat_id:
data['from_chat_id'] = from_chat_id
if message_id:
data['message_id'] = message_id
return url, data
@log
@message
def send_photo(self,
chat_id,
photo,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
**kwargs):
"""Use this method to send photos.
Note:
The photo argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
photo (:obj:`str` | `filelike object` | :class:`telegram.PhotoSize`): Photo to send.
Pass a file_id as String to send a photo that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a photo from the
Internet, or upload a new photo using multipart/form-data. Lastly you can pass
an existing :class:`telegram.PhotoSize` object to send.
caption (:obj:`str`, optional): Photo caption (may also be used when resending photos
by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendPhoto'.format(self.base_url)
if isinstance(photo, PhotoSize):
photo = photo.file_id
elif InputFile.is_file(photo):
photo = InputFile(photo)
data = {'chat_id': chat_id, 'photo': photo}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
@message
def send_audio(self,
chat_id,
audio,
duration=None,
performer=None,
title=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
thumb=None,
**kwargs):
"""
Use this method to send audio files, if you want Telegram clients to display them in the
music player. Your audio must be in the .mp3 format. On success, the sent Message is
returned. Bots can currently send audio files of up to 50 MB in size, this limit may be
changed in the future.
For sending voice messages, use the sendVoice method instead.
Note:
The audio argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
audio (:obj:`str` | `filelike object` | :class:`telegram.Audio`): Audio file to send.
Pass a file_id as String to send an audio file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an audio file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Audio` object to send.
caption (:obj:`str`, optional): Audio caption, 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
duration (:obj:`int`, optional): Duration of sent audio in seconds.
performer (:obj:`str`, optional): Performer.
title (:obj:`str`, optional): Track name.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAudio'.format(self.base_url)
if isinstance(audio, Audio):
audio = audio.file_id
elif InputFile.is_file(audio):
audio = InputFile(audio)
data = {'chat_id': chat_id, 'audio': audio}
if duration:
data['duration'] = duration
if performer:
data['performer'] = performer
if title:
data['title'] = title
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_document(self,
chat_id,
document,
filename=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
thumb=None,
**kwargs):
"""Use this method to send general files.
Note:
The document argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
document (:obj:`str` | `filelike object` | :class:`telegram.Document`): File to send.
Pass a file_id as String to send a file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a file from the
Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Document` object to send.
filename (:obj:`str`, optional): File name that shows in telegram message (it is useful
when you send file generated by temp module, for example). Undocumented.
caption (:obj:`str`, optional): Document caption (may also be used when resending
documents by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendDocument'.format(self.base_url)
if isinstance(document, Document):
document = document.file_id
elif InputFile.is_file(document):
document = InputFile(document, filename=filename)
data = {'chat_id': chat_id, 'document': document}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_sticker(self,
chat_id,
sticker,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
**kwargs):
"""Use this method to send .webp stickers.
Note:
The sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
sticker (:obj:`str` | `filelike object` :class:`telegram.Sticker`): Sticker to send.
Pass a file_id as String to send a file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a .webp file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Sticker` object to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendSticker'.format(self.base_url)
if isinstance(sticker, Sticker):
sticker = sticker.file_id
elif InputFile.is_file(sticker):
sticker = InputFile(sticker)
data = {'chat_id': chat_id, 'sticker': sticker}
return url, data
@log
@message
def send_video(self,
chat_id,
video,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
width=None,
height=None,
parse_mode=None,
supports_streaming=None,
thumb=None,
**kwargs):
"""
Use this method to send video files, Telegram clients support mp4 videos
(other formats may be sent as Document).
Note:
The video argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
video (:obj:`str` | `filelike object` | :class:`telegram.Video`): Video file to send.
Pass a file_id as String to send an video file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an video file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Video` object to send.
duration (:obj:`int`, optional): Duration of sent video in seconds.
width (:obj:`int`, optional): Video width.
height (:obj:`int`, optional): Video height.
caption (:obj:`str`, optional): Video caption (may also be used when resending videos
by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
supports_streaming (:obj:`bool`, optional): Pass True, if the uploaded video is
suitable for streaming.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideo'.format(self.base_url)
if isinstance(video, Video):
video = video.file_id
elif InputFile.is_file(video):
video = InputFile(video)
data = {'chat_id': chat_id, 'video': video}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if supports_streaming:
data['supports_streaming'] = supports_streaming
if width:
data['width'] = width
if height:
data['height'] = height
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_video_note(self,
chat_id,
video_note,
duration=None,
length=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
thumb=None,
**kwargs):
"""Use this method to send video messages.
Note:
The video_note argument can be either a file_id or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
video_note (:obj:`str` | `filelike object` | :class:`telegram.VideoNote`): Video note
to send. Pass a file_id as String to send a video note that exists on the Telegram
servers (recommended) or upload a new video using multipart/form-data. Or you can
pass an existing :class:`telegram.VideoNote` object to send. Sending video notes by
a URL is currently unsupported.
duration (:obj:`int`, optional): Duration of sent video in seconds.
length (:obj:`int`, optional): Video width and height
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideoNote'.format(self.base_url)
if isinstance(video_note, VideoNote):
video_note = video_note.file_id
elif InputFile.is_file(video_note):
video_note = InputFile(video_note)
data = {'chat_id': chat_id, 'video_note': video_note}
if duration is not None:
data['duration'] = duration
if length is not None:
data['length'] = length
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_animation(self,
chat_id,
animation,
duration=None,
width=None,
height=None,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
**kwargs):
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
animation (:obj:`str` | `filelike object` | :class:`telegram.Animation`): Animation to
send. Pass a file_id as String to send an animation that exists on the Telegram
servers (recommended), pass an HTTP URL as a String for Telegram to get an
animation from the Internet, or upload a new animation using multipart/form-data.
Lastly you can pass an existing :class:`telegram.Animation` object to send.
duration (:obj:`int`, optional): Duration of sent animation in seconds.
width (:obj:`int`, optional): Animation width.
height (:obj:`int`, optional): Animation height.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
caption (:obj:`str`, optional): Animation caption (may also be used when resending
animations by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAnimation'.format(self.base_url)
if isinstance(animation, Animation):
animation = animation.file_id
elif InputFile.is_file(animation):
animation = InputFile(animation)
data = {'chat_id': chat_id, 'animation': animation}
if duration:
data['duration'] = duration
if width:
data['width'] = width
if height:
data['height'] = height
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
@message
def send_voice(self,
chat_id,
voice,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
**kwargs):
"""
Use this method to send audio files, if you want Telegram clients to display the file
as a playable voice message. For this to work, your audio must be in an .ogg file
encoded with OPUS (other formats may be sent as Audio or Document).
Note:
The voice argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
voice (:obj:`str` | `filelike object` | :class:`telegram.Voice`): Voice file to send.
Pass a file_id as String to send an voice file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an voice file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Voice` object to send.
caption (:obj:`str`, optional): Voice message caption, 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
duration (:obj:`int`, optional): Duration of the voice message in seconds.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVoice'.format(self.base_url)
if isinstance(voice, Voice):
voice = voice.file_id
elif InputFile.is_file(voice):
voice = InputFile(voice)
data = {'chat_id': chat_id, 'voice': voice}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
def send_media_group(self,
chat_id,
media,
disable_notification=None,
reply_to_message_id=None,
timeout=20,
**kwargs):
"""Use this method to send a group of photos or videos as an album.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
media (List[:class:`telegram.InputMedia`]): An array describing photos and videos to be
sent, must include 2–10 items.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.Message`]: An array of the sent Messages.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendMediaGroup'.format(self.base_url)
data = {'chat_id': chat_id, 'media': media}
if reply_to_message_id:
data['reply_to_message_id'] = reply_to_message_id
if disable_notification:
data['disable_notification'] = disable_notification
result = self._request.post(url, data, timeout=timeout)
return [Message.de_json(res, self) for res in result]
@log
@message
def send_location(self,
chat_id,
latitude=None,
longitude=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
location=None,
live_period=None,
**kwargs):
"""Use this method to send point on the map.
Note:
You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
latitude (:obj:`float`, optional): Latitude of location.
longitude (:obj:`float`, optional): Longitude of location.
location (:class:`telegram.Location`, optional): The location to send.
live_period (:obj:`int`, optional): Period in seconds for which the location will be
updated, should be between 60 and 86400.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendLocation'.format(self.base_url)
if not (all([latitude, longitude]) or location):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument.")
if not ((latitude is not None or longitude is not None) ^ bool(location)):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument. Not both.")
if isinstance(location, Location):
latitude = location.latitude
longitude = location.longitude
data = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude}
if live_period:
data['live_period'] = live_period
return url, data
@log
@message
def edit_message_live_location(self,
chat_id=None,
message_id=None,
inline_message_id=None,
latitude=None,
longitude=None,
location=None,
reply_markup=None,
**kwargs):
"""Use this method to edit live location messages sent by the bot or via the bot
(for inline bots). A location can be edited until its :attr:`live_period` expires or
editing is explicitly disabled by a call to :attr:`stop_message_live_location`.
Note:
You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
latitude (:obj:`float`, optional): Latitude of location.
longitude (:obj:`float`, optional): Longitude of location.
location (:class:`telegram.Location`, optional): The location to send.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.Message`: On success the edited message.
"""
url = '{0}/editMessageLiveLocation'.format(self.base_url)
if not (all([latitude, longitude]) or location):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument.")
if not ((latitude is not None or longitude is not None) ^ bool(location)):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument. Not both.")
if isinstance(location, Location):
latitude = location.latitude
longitude = location.longitude
data = {'latitude': latitude, 'longitude': longitude}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def stop_message_live_location(self,
chat_id=None,
message_id=None,
inline_message_id=None,
reply_markup=None,
**kwargs):
"""Use this method to stop updating a live location message sent by the bot or via the bot
(for inline bots) before live_period expires.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.Message`: On success the edited message.
"""
url = '{0}/stopMessageLiveLocation'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def send_venue(self,
chat_id,
latitude=None,
longitude=None,
title=None,
address=None,
foursquare_id=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
venue=None,
foursquare_type=None,
**kwargs):
"""Use this method to send information about a venue.
Note:
you can either supply :obj:`venue`, or :obj:`latitude`, :obj:`longitude`,
:obj:`title` and :obj:`address` and optionally :obj:`foursquare_id` and optionally
:obj:`foursquare_type`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
latitude (:obj:`float`, optional): Latitude of venue.
longitude (:obj:`float`, optional): Longitude of venue.
title (:obj:`str`, optional): Name of the venue.
address (:obj:`str`, optional): Address of the venue.
foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue.
foursquare_type (:obj:`str`, optional): Foursquare type of the venue, if known.
(For example, "arts_entertainment/default", "arts_entertainment/aquarium" or
"food/icecream".)
venue (:class:`telegram.Venue`, optional): The venue to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVenue'.format(self.base_url)
if not (venue or all([latitude, longitude, address, title])):
raise ValueError("Either venue or latitude, longitude, address and title must be"
"passed as arguments.")
if isinstance(venue, Venue):
latitude = venue.location.latitude
longitude = venue.location.longitude
address = venue.address
title = venue.title
foursquare_id = venue.foursquare_id
foursquare_type = venue.foursquare_type
data = {
'chat_id': chat_id,
'latitude': latitude,
'longitude': longitude,
'address': address,
'title': title
}
if foursquare_id:
data['foursquare_id'] = foursquare_id
if foursquare_type:
data['foursquare_type'] = foursquare_type
return url, data
@log
@message
def send_contact(self,
chat_id,
phone_number=None,
first_name=None,
last_name=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
contact=None,
vcard=None,
**kwargs):
"""Use this method to send phone contacts.
Note:
You can either supply :obj:`contact` or :obj:`phone_number` and :obj:`first_name`
with optionally :obj:`last_name` and optionally :obj:`vcard`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
phone_number (:obj:`str`, optional): Contact's phone number.
first_name (:obj:`str`, optional): Contact's first name.
last_name (:obj:`str`, optional): Contact's last name.
vcard (:obj:`str`, optional): Additional data about the contact in the form of a vCard,
0-2048 bytes.
contact (:class:`telegram.Contact`, optional): The contact to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendContact'.format(self.base_url)
if (not contact) and (not all([phone_number, first_name])):
raise ValueError("Either contact or phone_number and first_name must be passed as"
"arguments.")
if isinstance(contact, Contact):
phone_number = contact.phone_number
first_name = contact.first_name
last_name = contact.last_name
vcard = contact.vcard
data = {'chat_id': chat_id, 'phone_number': phone_number, 'first_name': first_name}
if last_name:
data['last_name'] = last_name
if vcard:
data['vcard'] = vcard
return url, data
@log
@message
def send_game(self,
chat_id,
game_short_name,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send a game.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
game_short_name (:obj:`str`): Short name of the game, serves as the unique identifier
for the game. Set up your games via Botfather.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendGame'.format(self.base_url)
data = {'chat_id': chat_id, 'game_short_name': game_short_name}
return url, data
@log
def send_chat_action(self, chat_id, action, timeout=None, **kwargs):
"""
Use this method when you need to tell the user that something is happening on the bot's
side. The status is set for 5 seconds or less (when a message arrives from your bot,
Telegram clients clear its typing status).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
action(:class:`telegram.ChatAction` | :obj:`str`): Type of action to broadcast. Choose
one, depending on what the user is about to receive. For convenience look at the
constants in :class:`telegram.ChatAction`
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendChatAction'.format(self.base_url)
data = {'chat_id': chat_id, 'action': action}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_inline_query(self,
inline_query_id,
results,
cache_time=300,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None,
timeout=None,
**kwargs):
"""
Use this method to send answers to an inline query. No more than 50 results per query are
allowed.
Args:
inline_query_id (:obj:`str`): Unique identifier for the answered query.
results (List[:class:`telegram.InlineQueryResult`)]: A list of results for the inline
query.
cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the
result of the inline query may be cached on the server. Defaults to 300.
is_personal (:obj:`bool`, optional): Pass True, if results may be cached on the server
side only for the user that sent the query. By default, results may be returned to
any user who sends the same query.
next_offset (:obj:`str`, optional): Pass the offset that a client should send in the
next query with the same text to receive more results. Pass an empty string if
there are no more results or if you don't support pagination. Offset length can't
exceed 64 bytes.
switch_pm_text (:obj:`str`, optional): If passed, clients will display a button with
specified text that switches the user to a private chat with the bot and sends the
bot a start message with the parameter switch_pm_parameter.
switch_pm_parameter (:obj:`str`, optional): Deep-linking parameter for the /start
message sent to the bot when user presses the switch button. 1-64 characters,
only A-Z, a-z, 0-9, _ and - are allowed.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
he read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Example:
An inline bot that sends YouTube videos can ask the user to connect the bot to their
YouTube account to adapt search results accordingly. To do this, it displays a
'Connect your YouTube account' button above the results, or even before showing any.
The user presses the button, switches to a private chat with the bot and, in doing so,
passes a start parameter that instructs the bot to return an oauth link. Once done, the
bot can offer a switch_inline button so that the user can easily return to the chat
where they wanted to use the bot's inline capabilities.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/answerInlineQuery'.format(self.base_url)
results = [res.to_dict() for res in results]
data = {'inline_query_id': inline_query_id, 'results': results}
if cache_time or cache_time == 0:
data['cache_time'] = cache_time
if is_personal:
data['is_personal'] = is_personal
if next_offset is not None:
data['next_offset'] = next_offset
if switch_pm_text:
data['switch_pm_text'] = switch_pm_text
if switch_pm_parameter:
data['switch_pm_parameter'] = switch_pm_parameter
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_user_profile_photos(self, user_id, offset=None, limit=100, timeout=None, **kwargs):
"""Use this method to get a list of profile pictures for a user.
Args:
user_id (:obj:`int`): Unique identifier of the target user.
offset (:obj:`int`, optional): Sequential number of the first photo to be returned.
By default, all photos are returned.
limit (:obj:`int`, optional): Limits the number of photos to be retrieved. Values
between 1-100 are accepted. Defaults to 100.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.UserProfilePhotos`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUserProfilePhotos'.format(self.base_url)
data = {'user_id': user_id}
if offset is not None:
data['offset'] = offset
if limit:
data['limit'] = limit
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return UserProfilePhotos.de_json(result, self)
@log
def get_file(self, file_id, timeout=None, **kwargs):
"""
Use this method to get basic info about a file and prepare it for downloading. For the
moment, bots can download files of up to 20MB in size. The file can then be downloaded
with :attr:`telegram.File.download`. It is guaranteed that the link will be
valid for at least 1 hour. When the link expires, a new one can be requested by
calling get_file again.
Args:
file_id (:obj:`str` | :class:`telegram.Audio` | :class:`telegram.Document` | \
:class:`telegram.PhotoSize` | :class:`telegram.Sticker` | \
:class:`telegram.Video` | :class:`telegram.VideoNote` | \
:class:`telegram.Voice`):
Either the file identifier or an object that has a file_id attribute
to get file information about.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getFile'.format(self.base_url)
try:
file_id = file_id.file_id
except AttributeError:
pass
data = {'file_id': file_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
if result.get('file_path'):
result['file_path'] = '%s/%s' % (self.base_file_url, result['file_path'])
return File.de_json(result, self)
@log
def kick_chat_member(self, chat_id, user_id, timeout=None, until_date=None, **kwargs):
"""
Use this method to kick a user from a group or a supergroup. In the case of supergroups,
the user will not be able to return to the group on their own using invite links, etc.,
unless unbanned first. The bot must be an administrator in the group for this to work.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when the user will
be unbanned, unix time. If user is banned for more than 366 days or less than 30
seconds from the current time they are considered to be banned forever.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group. Otherwise
members may only be removed by the group's creator or by the member that added them.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/kickChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
if until_date is not None:
if isinstance(until_date, datetime):
until_date = to_timestamp(until_date)
data['until_date'] = until_date
result = self._request.post(url, data, timeout=timeout)
return result
@log
def unban_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to unban a previously kicked user in a supergroup.
The user will not return to the group automatically, but will be able to join via link,
etc. The bot must be an administrator in the group for this to work.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/unbanChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_callback_query(self,
callback_query_id,
text=None,
show_alert=False,
url=None,
cache_time=None,
timeout=None,
**kwargs):
"""
Use this method to send answers to callback queries sent from inline keyboards. The answer
will be displayed to the user as a notification at the top of the chat screen or as an
alert.
Alternatively, the user can be redirected to the specified Game URL. For this option to
work, you must first create a game for your bot via BotFather and accept the terms.
Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with
a parameter.
Args:
callback_query_id (:obj:`str`): Unique identifier for the query to be answered.
text (:obj:`str`, optional): Text of the notification. If not specified, nothing will
be shown to the user, 0-200 characters.
show_alert (:obj:`bool`, optional): If true, an alert will be shown by the client
instead of a notification at the top of the chat screen. Defaults to false.
url (:obj:`str`, optional): URL that will be opened by the user's client. If you have
created a Game and accepted the conditions via @Botfather, specify the URL that
opens your game - note that this will only work if the query comes from a callback
game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open
your bot with a parameter.
cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the
result of the callback query may be cached client-side. Defaults to 0.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/answerCallbackQuery'.format(self.base_url)
data = {'callback_query_id': callback_query_id}
if text:
data['text'] = text
if show_alert:
data['show_alert'] = show_alert
if url:
data['url'] = url
if cache_time is not None:
data['cache_time'] = cache_time
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
@message
def edit_message_text(self,
text,
chat_id=None,
message_id=None,
inline_message_id=None,
parse_mode=None,
disable_web_page_preview=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to edit text and game messages sent by the bot or via the bot (for inline
bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
text (:obj:`str`): New text of the message.
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold,
italic, fixed-width text or inline URLs in your bot's message. See the constants in
:class:`telegram.ParseMode` for the available modes.
disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in
this message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
edited Message is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/editMessageText'.format(self.base_url)
data = {'text': text}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
@message
def edit_message_caption(self,
chat_id=None,
message_id=None,
inline_message_id=None,
caption=None,
reply_markup=None,
timeout=None,
parse_mode=None,
**kwargs):
"""
Use this method to edit captions of messages sent by the bot or via the bot
(for inline bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
caption (:obj:`str`, optional): New caption of the message.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
edited Message is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_caption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageCaption'.format(self.base_url)
data = {}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def edit_message_media(self,
chat_id=None,
message_id=None,
inline_message_id=None,
media=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to edit audio, document, photo, or video messages. If a message is a
part of a message album, then it can be edited only to a photo or a video. Otherwise,
message type can be changed arbitrarily. When inline message is edited, new file can't be
uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the
edited message was sent by the bot, the edited Message is returned, otherwise True is
returned.
Args:
chat_id (:obj:`int` | :obj:`str`, optional): Unique identifier for the target chat or
username of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
media (:class:`telegram.InputMedia`): An object for a new media content
of the message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_caption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageMedia'.format(self.base_url)
data = {'media': media}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def edit_message_reply_markup(self,
chat_id=None,
message_id=None,
inline_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to edit only the reply markup of messages sent by the bot or via the bot
(for inline bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
editedMessage is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_reply_markup: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageReplyMarkup'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
def get_updates(self,
offset=None,
limit=100,
timeout=0,
read_latency=2.,
allowed_updates=None,
**kwargs):
"""Use this method to receive incoming updates using long polling.
Args:
offset (:obj:`int`, optional): Identifier of the first update to be returned. Must be
greater by one than the highest among the identifiers of previously received
updates. By default, updates starting with the earliest unconfirmed update are
returned. An update is considered confirmed as soon as getUpdates is called with an
offset higher than its update_id. The negative offset can be specified to retrieve
updates starting from -offset update from the end of the updates queue. All
previous updates will forgotten.
limit (:obj:`int`, optional): Limits the number of updates to be retrieved. Values
between 1-100 are accepted. Defaults to 100.
timeout (:obj:`int`, optional): Timeout in seconds for long polling. Defaults to 0,
i.e. usual short polling. Should be positive, short polling should be used for
testing purposes only.
allowed_updates (List[:obj:`str`]), optional): List the types of updates you want your
bot to receive. For example, specify ["message", "edited_channel_post",
"callback_query"] to only receive updates of these types. See
:class:`telegram.Update` for a complete list of available update types.
Specify an empty list to receive all updates regardless of type (default). If not
specified, the previous setting will be used. Please note that this parameter
doesn't affect updates created before the call to the get_updates, so unwanted
updates may be received for a short period of time.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Notes:
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each
server response.
3. To take full advantage of this library take a look at :class:`telegram.ext.Updater`
Returns:
List[:class:`telegram.Update`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUpdates'.format(self.base_url)
data = {'timeout': timeout}
if offset:
data['offset'] = offset
if limit:
data['limit'] = limit
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
data.update(kwargs)
# Ideally we'd use an aggressive read timeout for the polling. However,
# * Short polling should return within 2 seconds.
# * Long polling poses a different problem: the connection might have been dropped while
# waiting for the server to return and there's no way of knowing the connection had been
# dropped in real time.
result = self._request.post(url, data, timeout=float(read_latency) + float(timeout))
if result:
self.logger.debug('Getting updates: %s', [u['update_id'] for u in result])
else:
self.logger.debug('No new updates found.')
return [Update.de_json(u, self) for u in result]
@log
def set_webhook(self,
url=None,
certificate=None,
timeout=None,
max_connections=40,
allowed_updates=None,
**kwargs):
"""
Use this method to specify a url and receive incoming updates via an outgoing webhook.
Whenever there is an update for the bot, we will send an HTTPS POST request to the
specified url, containing a JSON-serialized Update. In case of an unsuccessful request,
we will give up after a reasonable amount of attempts.
If you'd like to make sure that the Webhook request comes from Telegram, we recommend
using a secret path in the URL, e.g. https://www.example.com/<token>. Since nobody else
knows your bot's token, you can be pretty sure it's us.
Note:
The certificate argument should be a file from disk ``open(filename, 'rb')``.
Args:
url (:obj:`str`): HTTPS url to send updates to. Use an empty string to remove webhook
integration.
certificate (:obj:`filelike`): Upload your public key certificate so that the root
certificate in use can be checked. See our self-signed guide for details.
(https://goo.gl/rw7w6Y)
max_connections (:obj:`int`, optional): Maximum allowed number of simultaneous HTTPS
connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower
values to limit the load on your bot's server, and higher values to increase your
bot's throughput.
allowed_updates (List[:obj:`str`], optional): List the types of updates you want your
bot to receive. For example, specify ["message", "edited_channel_post",
"callback_query"] to only receive updates of these types. See
:class:`telegram.Update` for a complete list of available update types. Specify an
empty list to receive all updates regardless of type (default). If not specified,
the previous setting will be used. Please note that this parameter doesn't affect
updates created before the call to the set_webhook, so unwanted updates may be
received for a short period of time.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Note:
1. You will not be able to receive updates using get_updates for as long as an outgoing
webhook is set up.
2. To use a self-signed certificate, you need to upload your public key certificate
using certificate parameter. Please upload as InputFile, sending a String will not
work.
3. Ports currently supported for Webhooks: 443, 80, 88, 8443.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/setWebhook'.format(self.base_url)
# Backwards-compatibility: 'url' used to be named 'webhook_url'
if 'webhook_url' in kwargs: # pragma: no cover
warnings.warn("The 'webhook_url' parameter has been renamed to 'url' in accordance "
"with the API")
if url is not None:
raise ValueError("The parameters 'url' and 'webhook_url' are mutually exclusive")
url = kwargs['webhook_url']
del kwargs['webhook_url']
data = {}
if url is not None:
data['url'] = url
if certificate:
if InputFile.is_file(certificate):
certificate = InputFile(certificate)
data['certificate'] = certificate
if max_connections is not None:
data['max_connections'] = max_connections
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def delete_webhook(self, timeout=None, **kwargs):
"""
Use this method to remove webhook integration if you decide to switch back to
getUpdates. Requires no parameters.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteWebhook'.format(self.base_url)
data = kwargs
result = self._request.post(url, data, timeout=timeout)
return result
@log
def leave_chat(self, chat_id, timeout=None, **kwargs):
"""Use this method for your bot to leave a group, supergroup or channel.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/leaveChat'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat(self, chat_id, timeout=None, **kwargs):
"""
Use this method to get up to date information about the chat (current name of the user for
one-on-one conversations, current username of a user, group or channel, etc.).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Chat`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChat'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return Chat.de_json(result, self)
@log
def get_chat_administrators(self, chat_id, timeout=None, **kwargs):
"""
Use this method to get a list of administrators in a chat. On success, returns an Array of
ChatMember objects that contains information about all chat administrators except other
bots. If the chat is a group or a supergroup and no administrators were appointed,
only the creator will be returned.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.ChatMember`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatAdministrators'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return [ChatMember.de_json(x, self) for x in result]
@log
def get_chat_members_count(self, chat_id, timeout=None, **kwargs):
"""Use this method to get the number of members in a chat
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
int: Number of members in the chat.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMembersCount'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to get information about a member of a chat.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.ChatMember`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return ChatMember.de_json(result, self)
@log
def set_chat_sticker_set(self, chat_id, sticker_set_name, timeout=None, **kwargs):
"""Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate
admin rights. Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned
in :attr:`get_chat` requests to check if the bot can use this method.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
sticker_set_name (:obj:`str`): Name of the sticker set to be set as the group
sticker set.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: True on success.
"""
url = '{0}/setChatStickerSet'.format(self.base_url)
data = {'chat_id': chat_id, 'sticker_set_name': sticker_set_name}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_chat_sticker_set(self, chat_id, timeout=None, **kwargs):
"""Use this method to delete a group sticker set from a supergroup. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned in
:attr:`get_chat` requests to check if the bot can use this method.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: True on success.
"""
url = '{0}/deleteChatStickerSet'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return result
def get_webhook_info(self, timeout=None, **kwargs):
"""Use this method to get current webhook status. Requires no parameters.
If the bot is using getUpdates, will return an object with the url field empty.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.WebhookInfo`
"""
url = '{0}/getWebhookInfo'.format(self.base_url)
data = kwargs
result = self._request.post(url, data, timeout=timeout)
return WebhookInfo.de_json(result, self)
@log
@message
def set_game_score(self,
user_id,
score,
chat_id=None,
message_id=None,
inline_message_id=None,
force=None,
disable_edit_message=None,
timeout=None,
**kwargs):
"""
Use this method to set the score of the specified user in a game. On success, if the
message was sent by the bot, returns the edited Message, otherwise returns True. Returns
an error, if the new score is not greater than the user's current score in the chat and
force is False.
Args:
user_id (:obj:`int`): User identifier.
score (:obj:`int`): New score, must be non-negative.
force (:obj:`bool`, optional): Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters
disable_edit_message (:obj:`bool`, optional): Pass True, if the game message should not
be automatically edited to include the current scoreboard.
chat_id (int|str, optional): Required if inline_message_id is not specified. Unique
identifier for the target chat.
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: The edited message, or if the message wasn't sent by the bot
, ``True``.
Raises:
:class:`telegram.TelegramError`: If the new score is not greater than the user's
current score in the chat and force is False.
"""
url = '{0}/setGameScore'.format(self.base_url)
data = {'user_id': user_id, 'score': score}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if force is not None:
data['force'] = force
if disable_edit_message is not None:
data['disable_edit_message'] = disable_edit_message
return url, data
@log
def get_game_high_scores(self,
user_id,
chat_id=None,
message_id=None,
inline_message_id=None,
timeout=None,
**kwargs):
"""
Use this method to get data for high score tables. Will return the score of the specified
user and several of his neighbors in a game
Args:
user_id (:obj:`int`): User identifier.
chat_id (:obj:`int` | :obj:`str`, optional): Required if inline_message_id is not
specified. Unique identifier for the target chat.
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.GameHighScore`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getGameHighScores'.format(self.base_url)
data = {'user_id': user_id}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return [GameHighScore.de_json(hs, self) for hs in result]
@log
@message
def send_invoice(self,
chat_id,
title,
description,
payload,
provider_token,
start_parameter,
currency,
prices,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
provider_data=None,
send_phone_number_to_provider=None,
send_email_to_provider=None,
timeout=None,
**kwargs):
"""Use this method to send invoices.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target private chat.
title (:obj:`str`): Product name.
description (:obj:`str`): Product description.
payload (:obj:`str`): Bot-defined invoice payload, 1-128 bytes. This will not be
displayed to the user, use for your internal processes.
provider_token (:obj:`str`): Payments provider token, obtained via Botfather.
start_parameter (:obj:`str`): Unique deep-linking parameter that can be used to
generate this invoice when used as a start parameter.
currency (:obj:`str`): Three-letter ISO 4217 currency code.
prices (List[:class:`telegram.LabeledPrice`)]: Price breakdown, a list of components
(e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.).
provider_data (:obj:`str` | :obj:`object`, optional): JSON-encoded data about the
invoice, which will be shared with the payment provider. A detailed description of
required fields should be provided by the payment provider. When an object is
passed, it will be encoded as JSON.
photo_url (:obj:`str`, optional): URL of the product photo for the invoice. Can be a
photo of the goods or a marketing image for a service. People like it better when
they see what they are paying for.
photo_size (:obj:`str`, optional): Photo size.
photo_width (:obj:`int`, optional): Photo width.
photo_height (:obj:`int`, optional): Photo height.
need_name (:obj:`bool`, optional): Pass True, if you require the user's full name to
complete the order.
need_phone_number (:obj:`bool`, optional): Pass True, if you require the user's
phone number to complete the order.
need_email (:obj:`bool`, optional): Pass True, if you require the user's email to
complete the order.
need_shipping_address (:obj:`bool`, optional): Pass True, if you require the user's
shipping address to complete the order.
send_phone_number_to_provider (:obj:`bool`, optional): Pass True, if user's phone
number should be sent to provider.
send_email_to_provider (:obj:`bool`, optional): Pass True, if user's email address
should be sent to provider.
is_flexible (:obj:`bool`, optional): Pass True, if the final price depends on the
shipping method.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options.
An inlinekeyboard. If empty, one 'Pay total price' button will be shown.
If not empty, the first button must be a Pay button.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendInvoice'.format(self.base_url)
data = {
'chat_id': chat_id,
'title': title,
'description': description,
'payload': payload,
'provider_token': provider_token,
'start_parameter': start_parameter,
'currency': currency,
'prices': [p.to_dict() for p in prices]
}
if provider_data is not None:
if isinstance(provider_data, string_types):
data['provider_data'] = provider_data
else:
data['provider_data'] = json.dumps(provider_data)
if photo_url is not None:
data['photo_url'] = photo_url
if photo_size is not None:
data['photo_size'] = photo_size
if photo_width is not None:
data['photo_width'] = photo_width
if photo_height is not None:
data['photo_height'] = photo_height
if need_name is not None:
data['need_name'] = need_name
if need_phone_number is not None:
data['need_phone_number'] = need_phone_number
if need_email is not None:
data['need_email'] = need_email
if need_shipping_address is not None:
data['need_shipping_address'] = need_shipping_address
if is_flexible is not None:
data['is_flexible'] = is_flexible
if send_phone_number_to_provider is not None:
data['send_phone_number_to_provider'] = send_email_to_provider
if send_email_to_provider is not None:
data['send_email_to_provider'] = send_email_to_provider
return url, data
@log
def answer_shipping_query(self,
shipping_query_id,
ok,
shipping_options=None,
error_message=None,
timeout=None,
**kwargs):
"""
If you sent an invoice requesting a shipping address and the parameter is_flexible was
specified, the Bot API will send an Update with a shipping_query field to the bot. Use
this method to reply to shipping queries.
Args:
shipping_query_id (:obj:`str`): Unique identifier for the query to be answered.
ok (:obj:`bool`): Specify True if delivery to the specified address is possible and
False if there are any problems (for example, if delivery to the specified address
is not possible).
shipping_options (List[:class:`telegram.ShippingOption`]), optional]: Required if ok is
True. A JSON-serialized array of available shipping options.
error_message (:obj:`str`, optional): Required if ok is False. Error message in
human readable form that explains why it is impossible to complete the order (e.g.
"Sorry, delivery to your desired address is unavailable"). Telegram will display
this message to the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`; On success, True is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if ok and (shipping_options is None or error_message is not None):
raise TelegramError(
'answerShippingQuery: If ok is True, shipping_options '
'should not be empty and there should not be error_message')
if not ok and (shipping_options is not None or error_message is None):
raise TelegramError(
'answerShippingQuery: If ok is False, error_message '
'should not be empty and there should not be shipping_options')
url_ = '{0}/answerShippingQuery'.format(self.base_url)
data = {'shipping_query_id': shipping_query_id, 'ok': ok}
if ok:
data['shipping_options'] = [option.to_dict() for option in shipping_options]
if error_message is not None:
data['error_message'] = error_message
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def answer_pre_checkout_query(self, pre_checkout_query_id, ok,
error_message=None, timeout=None, **kwargs):
"""
Once the user has confirmed their payment and shipping details, the Bot API sends the final
confirmation in the form of an Update with the field pre_checkout_query. Use this method to
respond to such pre-checkout queries.
Note:
The Bot API must receive an answer within 10 seconds after the pre-checkout
query was sent.
Args:
pre_checkout_query_id (:obj:`str`): Unique identifier for the query to be answered.
ok (:obj:`bool`): Specify True if everything is alright (goods are available, etc.) and
the bot is ready to proceed with the order. Use False if there are any problems.
error_message (:obj:`str`, optional): Required if ok is False. Error message in human
readable form that explains the reason for failure to proceed with the checkout
(e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you
were busy filling out your payment details. Please choose a different color or
garment!"). Telegram will display this message to the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if not (ok ^ (error_message is not None)):
raise TelegramError(
'answerPreCheckoutQuery: If ok is True, there should '
'not be error_message; if ok is False, error_message '
'should not be empty')
url_ = '{0}/answerPreCheckoutQuery'.format(self.base_url)
data = {'pre_checkout_query_id': pre_checkout_query_id, 'ok': ok}
if error_message is not None:
data['error_message'] = error_message
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def restrict_chat_member(self, chat_id, user_id, until_date=None, can_send_messages=None,
can_send_media_messages=None, can_send_other_messages=None,
can_add_web_page_previews=None, timeout=None, **kwargs):
"""
Use this method to restrict a user in a supergroup. The bot must be an administrator in
the supergroup for this to work and must have the appropriate admin rights. Pass True for
all boolean parameters to lift restrictions from a user.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
user_id (:obj:`int`): Unique identifier of the target user.
until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when restrictions
will be lifted for the user, unix time. If user is restricted for more than 366
days or less than 30 seconds from the current time, they are considered to be
restricted forever.
can_send_messages (:obj:`bool`, optional): Pass True, if the user can send text
messages, contacts, locations and venues.
can_send_media_messages (:obj:`bool`, optional): Pass True, if the user can send
audios, documents, photos, videos, video notes and voice notes, implies
can_send_messages.
can_send_other_messages (:obj:`bool`, optional): Pass True, if the user can send
animations, games, stickers and use inline bots, implies can_send_media_messages.
can_add_web_page_previews (:obj:`bool`, optional): Pass True, if the user may add
web page previews to their messages, implies can_send_media_messages.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/restrictChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
if until_date is not None:
if isinstance(until_date, datetime):
until_date = to_timestamp(until_date)
data['until_date'] = until_date
if can_send_messages is not None:
data['can_send_messages'] = can_send_messages
if can_send_media_messages is not None:
data['can_send_media_messages'] = can_send_media_messages
if can_send_other_messages is not None:
data['can_send_other_messages'] = can_send_other_messages
if can_add_web_page_previews is not None:
data['can_add_web_page_previews'] = can_add_web_page_previews
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def promote_chat_member(self, chat_id, user_id, can_change_info=None,
can_post_messages=None, can_edit_messages=None,
can_delete_messages=None, can_invite_users=None,
can_restrict_members=None, can_pin_messages=None,
can_promote_members=None, timeout=None, **kwargs):
"""
Use this method to promote or demote a user in a supergroup or a channel. The bot must be
an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
user_id (:obj:`int`): Unique identifier of the target user.
can_change_info (:obj:`bool`, optional): Pass True, if the administrator can change
chat title, photo and other settings.
can_post_messages (:obj:`bool`, optional): Pass True, if the administrator can
create channel posts, channels only.
can_edit_messages (:obj:`bool`, optional): Pass True, if the administrator can edit
messages of other users, channels only.
can_delete_messages (:obj:`bool`, optional): Pass True, if the administrator can
delete messages of other users.
can_invite_users (:obj:`bool`, optional): Pass True, if the administrator can invite
new users to the chat.
can_restrict_members (:obj:`bool`, optional): Pass True, if the administrator can
restrict, ban or unban chat members.
can_pin_messages (:obj:`bool`, optional): Pass True, if the administrator can pin
messages, supergroups only.
can_promote_members (:obj:`bool`, optional): Pass True, if the administrator can add
new administrators with a subset of his own privileges or demote administrators
that he has promoted, directly or indirectly (promoted by administrators that were
appointed by him).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/promoteChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
if can_change_info is not None:
data['can_change_info'] = can_change_info
if can_post_messages is not None:
data['can_post_messages'] = can_post_messages
if can_edit_messages is not None:
data['can_edit_messages'] = can_edit_messages
if can_delete_messages is not None:
data['can_delete_messages'] = can_delete_messages
if can_invite_users is not None:
data['can_invite_users'] = can_invite_users
if can_restrict_members is not None:
data['can_restrict_members'] = can_restrict_members
if can_pin_messages is not None:
data['can_pin_messages'] = can_pin_messages
if can_promote_members is not None:
data['can_promote_members'] = can_promote_members
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def export_chat_invite_link(self, chat_id, timeout=None, **kwargs):
"""
Use this method to export an invite link to a supergroup or a channel. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`str`: Exported invite link on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/exportChatInviteLink'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_photo(self, chat_id, photo, timeout=None, **kwargs):
"""Use this method to set a new profile photo for the chat.
Photos can't be changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
photo (`filelike object`): New chat photo.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatPhoto'.format(self.base_url)
if InputFile.is_file(photo):
photo = InputFile(photo)
data = {'chat_id': chat_id, 'photo': photo}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_chat_photo(self, chat_id, timeout=None, **kwargs):
"""
Use this method to delete a chat photo. Photos can't be changed for private chats. The bot
must be an administrator in the chat for this to work and must have the appropriate admin
rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteChatPhoto'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_title(self, chat_id, title, timeout=None, **kwargs):
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate
admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
title (:obj:`str`): New chat title, 1-255 characters.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatTitle'.format(self.base_url)
data = {'chat_id': chat_id, 'title': title}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_description(self, chat_id, description, timeout=None, **kwargs):
"""
Use this method to change the description of a supergroup or a channel. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
description (:obj:`str`): New chat description, 1-255 characters.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatDescription'.format(self.base_url)
data = {'chat_id': chat_id, 'description': description}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def pin_chat_message(self, chat_id, message_id, disable_notification=None, timeout=None,
**kwargs):
"""
Use this method to pin a message in a supergroup. The bot must be an administrator in the
chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`): Identifier of a message to pin.
disable_notification (:obj:`bool`, optional): Pass True, if it is not necessary to send
a notification to all group members about the new pinned message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/pinChatMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'message_id': message_id}
if disable_notification is not None:
data['disable_notification'] = disable_notification
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def unpin_chat_message(self, chat_id, timeout=None, **kwargs):
"""
Use this method to unpin a message in a supergroup. The bot must be an administrator in the
chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/unpinChatMessage'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_sticker_set(self, name, timeout=None, **kwargs):
"""Use this method to get a sticker set.
Args:
name (:obj:`str`): Short name of the sticker set that is used in t.me/addstickers/
URLs (e.g., animals)
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.StickerSet`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getStickerSet'.format(self.base_url)
data = {'name': name}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return StickerSet.de_json(result, self)
@log
def upload_sticker_file(self, user_id, png_sticker, timeout=None, **kwargs):
"""
Use this method to upload a .png file with a sticker for later use in
:attr:`create_new_sticker_set` and :attr:`add_sticker_to_set` methods (can be used multiple
times).
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of sticker file owner.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker,
must be up to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`: The uploaded File
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/uploadStickerFile'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'png_sticker': png_sticker}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return File.de_json(result, self)
@log
def create_new_sticker_set(self, user_id, name, title, png_sticker, emojis,
contains_masks=None, mask_position=None, timeout=None, **kwargs):
"""Use this method to create new sticker set owned by a user.
The bot will be able to edit the created sticker set.
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of created sticker set owner.
name (:obj:`str`): Short name of sticker set, to be used in t.me/addstickers/ URLs
(e.g., animals). Can contain only english letters, digits and underscores.
Must begin with a letter, can't contain consecutive underscores and
must end in "_by_<bot username>". <bot_username> is case insensitive.
1-64 characters.
title (:obj:`str`): Sticker set title, 1-64 characters.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up
to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px. Pass a file_id as a String to
send a file that already exists on the Telegram servers, pass an HTTP URL as a
String for Telegram to get a file from the Internet, or upload a new one
using multipart/form-data.
emojis (:obj:`str`): One or more emoji corresponding to the sticker.
contains_masks (:obj:`bool`, optional): Pass True, if a set of mask stickers should be
created.
mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask
should be placed on faces.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/createNewStickerSet'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'name': name, 'title': title, 'png_sticker': png_sticker,
'emojis': emojis}
if contains_masks is not None:
data['contains_masks'] = contains_masks
if mask_position is not None:
data['mask_position'] = mask_position
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def add_sticker_to_set(self, user_id, name, png_sticker, emojis, mask_position=None,
timeout=None, **kwargs):
"""Use this method to add a new sticker to a set created by the bot.
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of created sticker set owner.
name (:obj:`str`): Sticker set name.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up
to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px. Pass a file_id as a String to
send a file that already exists on the Telegram servers, pass an HTTP URL as a
String for Telegram to get a file from the Internet, or upload a new one
using multipart/form-data.
emojis (:obj:`str`): One or more emoji corresponding to the sticker.
mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask
should beplaced on faces.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/addStickerToSet'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'name': name, 'png_sticker': png_sticker, 'emojis': emojis}
if mask_position is not None:
data['mask_position'] = mask_position
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_sticker_position_in_set(self, sticker, position, timeout=None, **kwargs):
"""Use this method to move a sticker in a set created by the bot to a specific position.
Args:
sticker (:obj:`str`): File identifier of the sticker.
position (:obj:`int`): New sticker position in the set, zero-based.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setStickerPositionInSet'.format(self.base_url)
data = {'sticker': sticker, 'position': position}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_sticker_from_set(self, sticker, timeout=None, **kwargs):
"""Use this method to delete a sticker from a set created by the bot.
Args:
sticker (:obj:`str`): File identifier of the sticker.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteStickerFromSet'.format(self.base_url)
data = {'sticker': sticker}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_passport_data_errors(self, user_id, errors, timeout=None, **kwargs):
"""
Informs a user that some of the Telegram Passport elements they provided contains errors.
The user will not be able to re-submit their Passport to you until the errors are fixed
(the contents of the field for which you returned the error must change). Returns True
on success.
Use this if the data submitted by the user doesn't satisfy the standards your service
requires for any reason. For example, if a birthday date seems invalid, a submitted
document is blurry, a scan shows evidence of tampering, etc. Supply some details in the
error message to make sure the user knows how to correct the issues.
Args:
user_id (:obj:`int`): User identifier
errors (List[:class:`PassportElementError`]): A JSON-serialized array describing the
errors.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/setPassportDataErrors'.format(self.base_url)
data = {'user_id': user_id, 'errors': [error.to_dict() for error in errors]}
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
def to_dict(self):
data = {'id': self.id, 'username': self.username, 'first_name': self.username}
if self.last_name:
data['last_name'] = self.last_name
return data
def __reduce__(self):
return (self.__class__, (self.token, self.base_url.replace(self.token, ''),
self.base_file_url.replace(self.token, '')))
# camelCase aliases
getMe = get_me
"""Alias for :attr:`get_me`"""
sendMessage = send_message
"""Alias for :attr:`send_message`"""
deleteMessage = delete_message
"""Alias for :attr:`delete_message`"""
forwardMessage = forward_message
"""Alias for :attr:`forward_message`"""
sendPhoto = send_photo
"""Alias for :attr:`send_photo`"""
sendAudio = send_audio
"""Alias for :attr:`send_audio`"""
sendDocument = send_document
"""Alias for :attr:`send_document`"""
sendSticker = send_sticker
"""Alias for :attr:`send_sticker`"""
sendVideo = send_video
"""Alias for :attr:`send_video`"""
sendAnimation = send_animation
"""Alias for :attr:`send_animation`"""
sendVoice = send_voice
"""Alias for :attr:`send_voice`"""
sendVideoNote = send_video_note
"""Alias for :attr:`send_video_note`"""
sendMediaGroup = send_media_group
"""Alias for :attr:`send_media_group`"""
sendLocation = send_location
"""Alias for :attr:`send_location`"""
editMessageLiveLocation = edit_message_live_location
"""Alias for :attr:`edit_message_live_location`"""
stopMessageLiveLocation = stop_message_live_location
"""Alias for :attr:`stop_message_live_location`"""
sendVenue = send_venue
"""Alias for :attr:`send_venue`"""
sendContact = send_contact
"""Alias for :attr:`send_contact`"""
sendGame = send_game
"""Alias for :attr:`send_game`"""
sendChatAction = send_chat_action
"""Alias for :attr:`send_chat_action`"""
answerInlineQuery = answer_inline_query
"""Alias for :attr:`answer_inline_query`"""
getUserProfilePhotos = get_user_profile_photos
"""Alias for :attr:`get_user_profile_photos`"""
getFile = get_file
"""Alias for :attr:`get_file`"""
kickChatMember = kick_chat_member
"""Alias for :attr:`kick_chat_member`"""
unbanChatMember = unban_chat_member
"""Alias for :attr:`unban_chat_member`"""
answerCallbackQuery = answer_callback_query
"""Alias for :attr:`answer_callback_query`"""
editMessageText = edit_message_text
"""Alias for :attr:`edit_message_text`"""
editMessageCaption = edit_message_caption
"""Alias for :attr:`edit_message_caption`"""
editMessageMedia = edit_message_media
"""Alias for :attr:`edit_message_media`"""
editMessageReplyMarkup = edit_message_reply_markup
"""Alias for :attr:`edit_message_reply_markup`"""
getUpdates = get_updates
"""Alias for :attr:`get_updates`"""
setWebhook = set_webhook
"""Alias for :attr:`set_webhook`"""
deleteWebhook = delete_webhook
"""Alias for :attr:`delete_webhook`"""
leaveChat = leave_chat
"""Alias for :attr:`leave_chat`"""
getChat = get_chat
"""Alias for :attr:`get_chat`"""
getChatAdministrators = get_chat_administrators
"""Alias for :attr:`get_chat_administrators`"""
getChatMember = get_chat_member
"""Alias for :attr:`get_chat_member`"""
setChatStickerSet = set_chat_sticker_set
"""Alias for :attr:`set_chat_sticker_set`"""
deleteChatStickerSet = delete_chat_sticker_set
"""Alias for :attr:`delete_chat_sticker_set`"""
getChatMembersCount = get_chat_members_count
"""Alias for :attr:`get_chat_members_count`"""
getWebhookInfo = get_webhook_info
"""Alias for :attr:`get_webhook_info`"""
setGameScore = set_game_score
"""Alias for :attr:`set_game_score`"""
getGameHighScores = get_game_high_scores
"""Alias for :attr:`get_game_high_scores`"""
sendInvoice = send_invoice
"""Alias for :attr:`send_invoice`"""
answerShippingQuery = answer_shipping_query
"""Alias for :attr:`answer_shipping_query`"""
answerPreCheckoutQuery = answer_pre_checkout_query
"""Alias for :attr:`answer_pre_checkout_query`"""
restrictChatMember = restrict_chat_member
"""Alias for :attr:`restrict_chat_member`"""
promoteChatMember = promote_chat_member
"""Alias for :attr:`promote_chat_member`"""
exportChatInviteLink = export_chat_invite_link
"""Alias for :attr:`export_chat_invite_link`"""
setChatPhoto = set_chat_photo
"""Alias for :attr:`set_chat_photo`"""
deleteChatPhoto = delete_chat_photo
"""Alias for :attr:`delete_chat_photo`"""
setChatTitle = set_chat_title
"""Alias for :attr:`set_chat_title`"""
setChatDescription = set_chat_description
"""Alias for :attr:`set_chat_description`"""
pinChatMessage = pin_chat_message
"""Alias for :attr:`pin_chat_message`"""
unpinChatMessage = unpin_chat_message
"""Alias for :attr:`unpin_chat_message`"""
getStickerSet = get_sticker_set
"""Alias for :attr:`get_sticker_set`"""
uploadStickerFile = upload_sticker_file
"""Alias for :attr:`upload_sticker_file`"""
createNewStickerSet = create_new_sticker_set
"""Alias for :attr:`create_new_sticker_set`"""
addStickerToSet = add_sticker_to_set
"""Alias for :attr:`add_sticker_to_set`"""
setStickerPositionInSet = set_sticker_position_in_set
"""Alias for :attr:`set_sticker_position_in_set`"""
deleteStickerFromSet = delete_sticker_from_set
"""Alias for :attr:`delete_sticker_from_set`"""
setPassportDataErrors = set_passport_data_errors
"""Alias for :attr:`set_passport_data_errors`"""
|
<filename>venv/lib/python2.7/site-packages/telegram/bot.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=E0611,E0213,E1102,C0103,E1101,W0613,R0913,R0904
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Bot."""
import functools
try:
import ujson as json
except ImportError:
import json
import logging
import warnings
from datetime import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from future.utils import string_types
from telegram import (User, Message, Update, Chat, ChatMember, UserProfilePhotos, File,
ReplyMarkup, TelegramObject, WebhookInfo, GameHighScore, StickerSet,
PhotoSize, Audio, Document, Sticker, Video, Animation, Voice, VideoNote,
Location, Venue, Contact, InputFile)
from telegram.error import InvalidToken, TelegramError
from telegram.utils.helpers import to_timestamp
from telegram.utils.request import Request
logging.getLogger(__name__).addHandler(logging.NullHandler())
def info(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
if not self.bot:
self.get_me()
result = func(self, *args, **kwargs)
return result
return decorator
def log(func):
logger = logging.getLogger(func.__module__)
@functools.wraps(func)
def decorator(self, *args, **kwargs):
logger.debug('Entering: %s', func.__name__)
result = func(self, *args, **kwargs)
logger.debug(result)
logger.debug('Exiting: %s', func.__name__)
return result
return decorator
def message(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
url, data = func(self, *args, **kwargs)
if kwargs.get('reply_to_message_id'):
data['reply_to_message_id'] = kwargs.get('reply_to_message_id')
if kwargs.get('disable_notification'):
data['disable_notification'] = kwargs.get('disable_notification')
if kwargs.get('reply_markup'):
reply_markup = kwargs.get('reply_markup')
if isinstance(reply_markup, ReplyMarkup):
data['reply_markup'] = reply_markup.to_json()
else:
data['reply_markup'] = reply_markup
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
if result is True:
return result
return Message.de_json(result, self)
return decorator
class Bot(TelegramObject):
"""This object represents a Telegram Bot.
Args:
token (:obj:`str`): Bot's unique authentication.
base_url (:obj:`str`, optional): Telegram Bot API service URL.
base_file_url (:obj:`str`, optional): Telegram Bot API file URL.
request (:obj:`telegram.utils.request.Request`, optional): Pre initialized
:obj:`telegram.utils.request.Request`.
private_key (:obj:`bytes`, optional): Private key for decryption of telegram passport data.
private_key_password (:obj:`bytes`, optional): Password for above private key.
"""
def __init__(self, token, base_url=None, base_file_url=None, request=None, private_key=None,
private_key_password=None):
self.token = self._validate_token(token)
if base_url is None:
base_url = 'https://api.telegram.org/bot'
if base_file_url is None:
base_file_url = 'https://api.telegram.org/file/bot'
self.base_url = str(base_url) + str(self.token)
self.base_file_url = str(base_file_url) + str(self.token)
self.bot = None
self._request = request or Request()
self.logger = logging.getLogger(__name__)
if private_key:
self.private_key = serialization.load_pem_private_key(private_key,
password=<PASSWORD>_key_password,
backend=default_backend())
@property
def request(self):
return self._request
@staticmethod
def _validate_token(token):
"""A very basic validation on token."""
if any(x.isspace() for x in token):
raise InvalidToken()
left, sep, _right = token.partition(':')
if (not sep) or (not left.isdigit()) or (len(left) < 3):
raise InvalidToken()
return token
@property
@info
def id(self):
""":obj:`int`: Unique identifier for this bot."""
return self.bot.id
@property
@info
def first_name(self):
""":obj:`str`: Bot's first name."""
return self.bot.first_name
@property
@info
def last_name(self):
""":obj:`str`: Optional. Bot's last name."""
return self.bot.last_name
@property
@info
def username(self):
""":obj:`str`: Bot's username."""
return self.bot.username
@property
def name(self):
""":obj:`str`: Bot's @username."""
return '@{0}'.format(self.username)
@log
def get_me(self, timeout=None, **kwargs):
"""A simple method for testing your bot's auth token. Requires no parameters.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.User`: A :class:`telegram.User` instance representing that bot if the
credentials are valid, :obj:`None` otherwise.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getMe'.format(self.base_url)
result = self._request.get(url, timeout=timeout)
self.bot = User.de_json(result, self)
return self.bot
@log
@message
def send_message(self,
chat_id,
text,
parse_mode=None,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send text messages.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
text (:obj:`str`): Text of the message to be sent. Max 4096 characters. Also found as
:attr:`telegram.constants.MAX_MESSAGE_LENGTH`.
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold,
italic, fixed-width text or inline URLs in your bot's message. See the constants in
:class:`telegram.ParseMode` for the available modes.
disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in
this message.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options.
A JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'text': text}
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
def delete_message(self, chat_id, message_id, timeout=None, **kwargs):
"""
Use this method to delete a message. A message can only be deleted if it was sent less
than 48 hours ago. Any such recently sent outgoing message may be deleted. Additionally,
if the bot is an administrator in a group chat, it can delete any message. If the bot is
an administrator in a supergroup, it can delete messages from any other user and service
messages about people joining or leaving the group (other types of service messages may
only be removed by the group creator). In channels, bots can only remove their own
messages.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`): Identifier of the message to delete.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'message_id': message_id}
result = self._request.post(url, data, timeout=timeout)
return result
@log
@message
def forward_message(self,
chat_id,
from_chat_id,
message_id,
disable_notification=False,
timeout=None,
**kwargs):
"""Use this method to forward messages of any kind.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
from_chat_id (:obj:`int` | :obj:`str`): Unique identifier for the chat where the
original message was sent (or channel username in the format @channelusername).
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
message_id (:obj:`int`): Message identifier in the chat specified in from_chat_id.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout
from the server (instead of the one specified during creation of the connection
pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/forwardMessage'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if from_chat_id:
data['from_chat_id'] = from_chat_id
if message_id:
data['message_id'] = message_id
return url, data
@log
@message
def send_photo(self,
chat_id,
photo,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
**kwargs):
"""Use this method to send photos.
Note:
The photo argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
photo (:obj:`str` | `filelike object` | :class:`telegram.PhotoSize`): Photo to send.
Pass a file_id as String to send a photo that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a photo from the
Internet, or upload a new photo using multipart/form-data. Lastly you can pass
an existing :class:`telegram.PhotoSize` object to send.
caption (:obj:`str`, optional): Photo caption (may also be used when resending photos
by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendPhoto'.format(self.base_url)
if isinstance(photo, PhotoSize):
photo = photo.file_id
elif InputFile.is_file(photo):
photo = InputFile(photo)
data = {'chat_id': chat_id, 'photo': photo}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
@message
def send_audio(self,
chat_id,
audio,
duration=None,
performer=None,
title=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
thumb=None,
**kwargs):
"""
Use this method to send audio files, if you want Telegram clients to display them in the
music player. Your audio must be in the .mp3 format. On success, the sent Message is
returned. Bots can currently send audio files of up to 50 MB in size, this limit may be
changed in the future.
For sending voice messages, use the sendVoice method instead.
Note:
The audio argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
audio (:obj:`str` | `filelike object` | :class:`telegram.Audio`): Audio file to send.
Pass a file_id as String to send an audio file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an audio file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Audio` object to send.
caption (:obj:`str`, optional): Audio caption, 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
duration (:obj:`int`, optional): Duration of sent audio in seconds.
performer (:obj:`str`, optional): Performer.
title (:obj:`str`, optional): Track name.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAudio'.format(self.base_url)
if isinstance(audio, Audio):
audio = audio.file_id
elif InputFile.is_file(audio):
audio = InputFile(audio)
data = {'chat_id': chat_id, 'audio': audio}
if duration:
data['duration'] = duration
if performer:
data['performer'] = performer
if title:
data['title'] = title
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_document(self,
chat_id,
document,
filename=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
thumb=None,
**kwargs):
"""Use this method to send general files.
Note:
The document argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
document (:obj:`str` | `filelike object` | :class:`telegram.Document`): File to send.
Pass a file_id as String to send a file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a file from the
Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Document` object to send.
filename (:obj:`str`, optional): File name that shows in telegram message (it is useful
when you send file generated by temp module, for example). Undocumented.
caption (:obj:`str`, optional): Document caption (may also be used when resending
documents by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendDocument'.format(self.base_url)
if isinstance(document, Document):
document = document.file_id
elif InputFile.is_file(document):
document = InputFile(document, filename=filename)
data = {'chat_id': chat_id, 'document': document}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_sticker(self,
chat_id,
sticker,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
**kwargs):
"""Use this method to send .webp stickers.
Note:
The sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
sticker (:obj:`str` | `filelike object` :class:`telegram.Sticker`): Sticker to send.
Pass a file_id as String to send a file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get a .webp file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Sticker` object to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendSticker'.format(self.base_url)
if isinstance(sticker, Sticker):
sticker = sticker.file_id
elif InputFile.is_file(sticker):
sticker = InputFile(sticker)
data = {'chat_id': chat_id, 'sticker': sticker}
return url, data
@log
@message
def send_video(self,
chat_id,
video,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
width=None,
height=None,
parse_mode=None,
supports_streaming=None,
thumb=None,
**kwargs):
"""
Use this method to send video files, Telegram clients support mp4 videos
(other formats may be sent as Document).
Note:
The video argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
video (:obj:`str` | `filelike object` | :class:`telegram.Video`): Video file to send.
Pass a file_id as String to send an video file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an video file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Video` object to send.
duration (:obj:`int`, optional): Duration of sent video in seconds.
width (:obj:`int`, optional): Video width.
height (:obj:`int`, optional): Video height.
caption (:obj:`str`, optional): Video caption (may also be used when resending videos
by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
supports_streaming (:obj:`bool`, optional): Pass True, if the uploaded video is
suitable for streaming.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideo'.format(self.base_url)
if isinstance(video, Video):
video = video.file_id
elif InputFile.is_file(video):
video = InputFile(video)
data = {'chat_id': chat_id, 'video': video}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if supports_streaming:
data['supports_streaming'] = supports_streaming
if width:
data['width'] = width
if height:
data['height'] = height
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_video_note(self,
chat_id,
video_note,
duration=None,
length=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
thumb=None,
**kwargs):
"""Use this method to send video messages.
Note:
The video_note argument can be either a file_id or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
video_note (:obj:`str` | `filelike object` | :class:`telegram.VideoNote`): Video note
to send. Pass a file_id as String to send a video note that exists on the Telegram
servers (recommended) or upload a new video using multipart/form-data. Or you can
pass an existing :class:`telegram.VideoNote` object to send. Sending video notes by
a URL is currently unsupported.
duration (:obj:`int`, optional): Duration of sent video in seconds.
length (:obj:`int`, optional): Video width and height
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVideoNote'.format(self.base_url)
if isinstance(video_note, VideoNote):
video_note = video_note.file_id
elif InputFile.is_file(video_note):
video_note = InputFile(video_note)
data = {'chat_id': chat_id, 'video_note': video_note}
if duration is not None:
data['duration'] = duration
if length is not None:
data['length'] = length
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
return url, data
@log
@message
def send_animation(self,
chat_id,
animation,
duration=None,
width=None,
height=None,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
**kwargs):
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
animation (:obj:`str` | `filelike object` | :class:`telegram.Animation`): Animation to
send. Pass a file_id as String to send an animation that exists on the Telegram
servers (recommended), pass an HTTP URL as a String for Telegram to get an
animation from the Internet, or upload a new animation using multipart/form-data.
Lastly you can pass an existing :class:`telegram.Animation` object to send.
duration (:obj:`int`, optional): Duration of sent animation in seconds.
width (:obj:`int`, optional): Animation width.
height (:obj:`int`, optional): Animation height.
thumb (`filelike object`, optional): Thumbnail of the
file sent. The thumbnail should be in JPEG format and less than 200 kB in size.
A thumbnail's width and height should not exceed 90. Ignored if the file is not
is passed as a string or file_id.
caption (:obj:`str`, optional): Animation caption (may also be used when resending
animations by file_id), 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendAnimation'.format(self.base_url)
if isinstance(animation, Animation):
animation = animation.file_id
elif InputFile.is_file(animation):
animation = InputFile(animation)
data = {'chat_id': chat_id, 'animation': animation}
if duration:
data['duration'] = duration
if width:
data['width'] = width
if height:
data['height'] = height
if thumb:
if InputFile.is_file(thumb):
thumb = InputFile(thumb, attach=True)
data['thumb'] = thumb
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
@message
def send_voice(self,
chat_id,
voice,
duration=None,
caption=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=20,
parse_mode=None,
**kwargs):
"""
Use this method to send audio files, if you want Telegram clients to display the file
as a playable voice message. For this to work, your audio must be in an .ogg file
encoded with OPUS (other formats may be sent as Audio or Document).
Note:
The voice argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
voice (:obj:`str` | `filelike object` | :class:`telegram.Voice`): Voice file to send.
Pass a file_id as String to send an voice file that exists on the Telegram servers
(recommended), pass an HTTP URL as a String for Telegram to get an voice file from
the Internet, or upload a new one using multipart/form-data. Lastly you can pass
an existing :class:`telegram.Voice` object to send.
caption (:obj:`str`, optional): Voice message caption, 0-200 characters.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
duration (:obj:`int`, optional): Duration of the voice message in seconds.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVoice'.format(self.base_url)
if isinstance(voice, Voice):
voice = voice.file_id
elif InputFile.is_file(voice):
voice = InputFile(voice)
data = {'chat_id': chat_id, 'voice': voice}
if duration:
data['duration'] = duration
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
return url, data
@log
def send_media_group(self,
chat_id,
media,
disable_notification=None,
reply_to_message_id=None,
timeout=20,
**kwargs):
"""Use this method to send a group of photos or videos as an album.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
media (List[:class:`telegram.InputMedia`]): An array describing photos and videos to be
sent, must include 2–10 items.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.Message`]: An array of the sent Messages.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendMediaGroup'.format(self.base_url)
data = {'chat_id': chat_id, 'media': media}
if reply_to_message_id:
data['reply_to_message_id'] = reply_to_message_id
if disable_notification:
data['disable_notification'] = disable_notification
result = self._request.post(url, data, timeout=timeout)
return [Message.de_json(res, self) for res in result]
@log
@message
def send_location(self,
chat_id,
latitude=None,
longitude=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
location=None,
live_period=None,
**kwargs):
"""Use this method to send point on the map.
Note:
You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
latitude (:obj:`float`, optional): Latitude of location.
longitude (:obj:`float`, optional): Longitude of location.
location (:class:`telegram.Location`, optional): The location to send.
live_period (:obj:`int`, optional): Period in seconds for which the location will be
updated, should be between 60 and 86400.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendLocation'.format(self.base_url)
if not (all([latitude, longitude]) or location):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument.")
if not ((latitude is not None or longitude is not None) ^ bool(location)):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument. Not both.")
if isinstance(location, Location):
latitude = location.latitude
longitude = location.longitude
data = {'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude}
if live_period:
data['live_period'] = live_period
return url, data
@log
@message
def edit_message_live_location(self,
chat_id=None,
message_id=None,
inline_message_id=None,
latitude=None,
longitude=None,
location=None,
reply_markup=None,
**kwargs):
"""Use this method to edit live location messages sent by the bot or via the bot
(for inline bots). A location can be edited until its :attr:`live_period` expires or
editing is explicitly disabled by a call to :attr:`stop_message_live_location`.
Note:
You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
latitude (:obj:`float`, optional): Latitude of location.
longitude (:obj:`float`, optional): Longitude of location.
location (:class:`telegram.Location`, optional): The location to send.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.Message`: On success the edited message.
"""
url = '{0}/editMessageLiveLocation'.format(self.base_url)
if not (all([latitude, longitude]) or location):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument.")
if not ((latitude is not None or longitude is not None) ^ bool(location)):
raise ValueError("Either location or latitude and longitude must be passed as"
"argument. Not both.")
if isinstance(location, Location):
latitude = location.latitude
longitude = location.longitude
data = {'latitude': latitude, 'longitude': longitude}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def stop_message_live_location(self,
chat_id=None,
message_id=None,
inline_message_id=None,
reply_markup=None,
**kwargs):
"""Use this method to stop updating a live location message sent by the bot or via the bot
(for inline bots) before live_period expires.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
Returns:
:class:`telegram.Message`: On success the edited message.
"""
url = '{0}/stopMessageLiveLocation'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def send_venue(self,
chat_id,
latitude=None,
longitude=None,
title=None,
address=None,
foursquare_id=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
venue=None,
foursquare_type=None,
**kwargs):
"""Use this method to send information about a venue.
Note:
you can either supply :obj:`venue`, or :obj:`latitude`, :obj:`longitude`,
:obj:`title` and :obj:`address` and optionally :obj:`foursquare_id` and optionally
:obj:`foursquare_type`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
latitude (:obj:`float`, optional): Latitude of venue.
longitude (:obj:`float`, optional): Longitude of venue.
title (:obj:`str`, optional): Name of the venue.
address (:obj:`str`, optional): Address of the venue.
foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue.
foursquare_type (:obj:`str`, optional): Foursquare type of the venue, if known.
(For example, "arts_entertainment/default", "arts_entertainment/aquarium" or
"food/icecream".)
venue (:class:`telegram.Venue`, optional): The venue to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendVenue'.format(self.base_url)
if not (venue or all([latitude, longitude, address, title])):
raise ValueError("Either venue or latitude, longitude, address and title must be"
"passed as arguments.")
if isinstance(venue, Venue):
latitude = venue.location.latitude
longitude = venue.location.longitude
address = venue.address
title = venue.title
foursquare_id = venue.foursquare_id
foursquare_type = venue.foursquare_type
data = {
'chat_id': chat_id,
'latitude': latitude,
'longitude': longitude,
'address': address,
'title': title
}
if foursquare_id:
data['foursquare_id'] = foursquare_id
if foursquare_type:
data['foursquare_type'] = foursquare_type
return url, data
@log
@message
def send_contact(self,
chat_id,
phone_number=None,
first_name=None,
last_name=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
contact=None,
vcard=None,
**kwargs):
"""Use this method to send phone contacts.
Note:
You can either supply :obj:`contact` or :obj:`phone_number` and :obj:`first_name`
with optionally :obj:`last_name` and optionally :obj:`vcard`.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
phone_number (:obj:`str`, optional): Contact's phone number.
first_name (:obj:`str`, optional): Contact's first name.
last_name (:obj:`str`, optional): Contact's last name.
vcard (:obj:`str`, optional): Additional data about the contact in the form of a vCard,
0-2048 bytes.
contact (:class:`telegram.Contact`, optional): The contact to send.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendContact'.format(self.base_url)
if (not contact) and (not all([phone_number, first_name])):
raise ValueError("Either contact or phone_number and first_name must be passed as"
"arguments.")
if isinstance(contact, Contact):
phone_number = contact.phone_number
first_name = contact.first_name
last_name = contact.last_name
vcard = contact.vcard
data = {'chat_id': chat_id, 'phone_number': phone_number, 'first_name': first_name}
if last_name:
data['last_name'] = last_name
if vcard:
data['vcard'] = vcard
return url, data
@log
@message
def send_game(self,
chat_id,
game_short_name,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to send a game.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
game_short_name (:obj:`str`): Short name of the game, serves as the unique identifier
for the game. Set up your games via Botfather.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendGame'.format(self.base_url)
data = {'chat_id': chat_id, 'game_short_name': game_short_name}
return url, data
@log
def send_chat_action(self, chat_id, action, timeout=None, **kwargs):
"""
Use this method when you need to tell the user that something is happening on the bot's
side. The status is set for 5 seconds or less (when a message arrives from your bot,
Telegram clients clear its typing status).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
action(:class:`telegram.ChatAction` | :obj:`str`): Type of action to broadcast. Choose
one, depending on what the user is about to receive. For convenience look at the
constants in :class:`telegram.ChatAction`
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendChatAction'.format(self.base_url)
data = {'chat_id': chat_id, 'action': action}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_inline_query(self,
inline_query_id,
results,
cache_time=300,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None,
timeout=None,
**kwargs):
"""
Use this method to send answers to an inline query. No more than 50 results per query are
allowed.
Args:
inline_query_id (:obj:`str`): Unique identifier for the answered query.
results (List[:class:`telegram.InlineQueryResult`)]: A list of results for the inline
query.
cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the
result of the inline query may be cached on the server. Defaults to 300.
is_personal (:obj:`bool`, optional): Pass True, if results may be cached on the server
side only for the user that sent the query. By default, results may be returned to
any user who sends the same query.
next_offset (:obj:`str`, optional): Pass the offset that a client should send in the
next query with the same text to receive more results. Pass an empty string if
there are no more results or if you don't support pagination. Offset length can't
exceed 64 bytes.
switch_pm_text (:obj:`str`, optional): If passed, clients will display a button with
specified text that switches the user to a private chat with the bot and sends the
bot a start message with the parameter switch_pm_parameter.
switch_pm_parameter (:obj:`str`, optional): Deep-linking parameter for the /start
message sent to the bot when user presses the switch button. 1-64 characters,
only A-Z, a-z, 0-9, _ and - are allowed.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
he read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Example:
An inline bot that sends YouTube videos can ask the user to connect the bot to their
YouTube account to adapt search results accordingly. To do this, it displays a
'Connect your YouTube account' button above the results, or even before showing any.
The user presses the button, switches to a private chat with the bot and, in doing so,
passes a start parameter that instructs the bot to return an oauth link. Once done, the
bot can offer a switch_inline button so that the user can easily return to the chat
where they wanted to use the bot's inline capabilities.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/answerInlineQuery'.format(self.base_url)
results = [res.to_dict() for res in results]
data = {'inline_query_id': inline_query_id, 'results': results}
if cache_time or cache_time == 0:
data['cache_time'] = cache_time
if is_personal:
data['is_personal'] = is_personal
if next_offset is not None:
data['next_offset'] = next_offset
if switch_pm_text:
data['switch_pm_text'] = switch_pm_text
if switch_pm_parameter:
data['switch_pm_parameter'] = switch_pm_parameter
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_user_profile_photos(self, user_id, offset=None, limit=100, timeout=None, **kwargs):
"""Use this method to get a list of profile pictures for a user.
Args:
user_id (:obj:`int`): Unique identifier of the target user.
offset (:obj:`int`, optional): Sequential number of the first photo to be returned.
By default, all photos are returned.
limit (:obj:`int`, optional): Limits the number of photos to be retrieved. Values
between 1-100 are accepted. Defaults to 100.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.UserProfilePhotos`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUserProfilePhotos'.format(self.base_url)
data = {'user_id': user_id}
if offset is not None:
data['offset'] = offset
if limit:
data['limit'] = limit
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return UserProfilePhotos.de_json(result, self)
@log
def get_file(self, file_id, timeout=None, **kwargs):
"""
Use this method to get basic info about a file and prepare it for downloading. For the
moment, bots can download files of up to 20MB in size. The file can then be downloaded
with :attr:`telegram.File.download`. It is guaranteed that the link will be
valid for at least 1 hour. When the link expires, a new one can be requested by
calling get_file again.
Args:
file_id (:obj:`str` | :class:`telegram.Audio` | :class:`telegram.Document` | \
:class:`telegram.PhotoSize` | :class:`telegram.Sticker` | \
:class:`telegram.Video` | :class:`telegram.VideoNote` | \
:class:`telegram.Voice`):
Either the file identifier or an object that has a file_id attribute
to get file information about.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getFile'.format(self.base_url)
try:
file_id = file_id.file_id
except AttributeError:
pass
data = {'file_id': file_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
if result.get('file_path'):
result['file_path'] = '%s/%s' % (self.base_file_url, result['file_path'])
return File.de_json(result, self)
@log
def kick_chat_member(self, chat_id, user_id, timeout=None, until_date=None, **kwargs):
"""
Use this method to kick a user from a group or a supergroup. In the case of supergroups,
the user will not be able to return to the group on their own using invite links, etc.,
unless unbanned first. The bot must be an administrator in the group for this to work.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when the user will
be unbanned, unix time. If user is banned for more than 366 days or less than 30
seconds from the current time they are considered to be banned forever.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group. Otherwise
members may only be removed by the group's creator or by the member that added them.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/kickChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
if until_date is not None:
if isinstance(until_date, datetime):
until_date = to_timestamp(until_date)
data['until_date'] = until_date
result = self._request.post(url, data, timeout=timeout)
return result
@log
def unban_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to unban a previously kicked user in a supergroup.
The user will not return to the group automatically, but will be able to join via link,
etc. The bot must be an administrator in the group for this to work.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/unbanChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def answer_callback_query(self,
callback_query_id,
text=None,
show_alert=False,
url=None,
cache_time=None,
timeout=None,
**kwargs):
"""
Use this method to send answers to callback queries sent from inline keyboards. The answer
will be displayed to the user as a notification at the top of the chat screen or as an
alert.
Alternatively, the user can be redirected to the specified Game URL. For this option to
work, you must first create a game for your bot via BotFather and accept the terms.
Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with
a parameter.
Args:
callback_query_id (:obj:`str`): Unique identifier for the query to be answered.
text (:obj:`str`, optional): Text of the notification. If not specified, nothing will
be shown to the user, 0-200 characters.
show_alert (:obj:`bool`, optional): If true, an alert will be shown by the client
instead of a notification at the top of the chat screen. Defaults to false.
url (:obj:`str`, optional): URL that will be opened by the user's client. If you have
created a Game and accepted the conditions via @Botfather, specify the URL that
opens your game - note that this will only work if the query comes from a callback
game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open
your bot with a parameter.
cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the
result of the callback query may be cached client-side. Defaults to 0.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/answerCallbackQuery'.format(self.base_url)
data = {'callback_query_id': callback_query_id}
if text:
data['text'] = text
if show_alert:
data['show_alert'] = show_alert
if url:
data['url'] = url
if cache_time is not None:
data['cache_time'] = cache_time
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
@message
def edit_message_text(self,
text,
chat_id=None,
message_id=None,
inline_message_id=None,
parse_mode=None,
disable_web_page_preview=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to edit text and game messages sent by the bot or via the bot (for inline
bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
text (:obj:`str`): New text of the message.
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold,
italic, fixed-width text or inline URLs in your bot's message. See the constants in
:class:`telegram.ParseMode` for the available modes.
disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in
this message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
edited Message is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/editMessageText'.format(self.base_url)
data = {'text': text}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if parse_mode:
data['parse_mode'] = parse_mode
if disable_web_page_preview:
data['disable_web_page_preview'] = disable_web_page_preview
return url, data
@log
@message
def edit_message_caption(self,
chat_id=None,
message_id=None,
inline_message_id=None,
caption=None,
reply_markup=None,
timeout=None,
parse_mode=None,
**kwargs):
"""
Use this method to edit captions of messages sent by the bot or via the bot
(for inline bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
caption (:obj:`str`, optional): New caption of the message.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to
show bold, italic, fixed-width text or inline URLs in the media caption. See the
constants in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
edited Message is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_caption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageCaption'.format(self.base_url)
data = {}
if caption:
data['caption'] = caption
if parse_mode:
data['parse_mode'] = parse_mode
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def edit_message_media(self,
chat_id=None,
message_id=None,
inline_message_id=None,
media=None,
reply_markup=None,
timeout=None,
**kwargs):
"""Use this method to edit audio, document, photo, or video messages. If a message is a
part of a message album, then it can be edited only to a photo or a video. Otherwise,
message type can be changed arbitrarily. When inline message is edited, new file can't be
uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the
edited message was sent by the bot, the edited Message is returned, otherwise True is
returned.
Args:
chat_id (:obj:`int` | :obj:`str`, optional): Unique identifier for the target chat or
username of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
media (:class:`telegram.InputMedia`): An object for a new media content
of the message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_caption: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageMedia'.format(self.base_url)
data = {'media': media}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
@message
def edit_message_reply_markup(self,
chat_id=None,
message_id=None,
inline_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
"""
Use this method to edit only the reply markup of messages sent by the bot or via the bot
(for inline bots).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A
JSON-serialized object for an inline keyboard, custom reply keyboard, instructions
to remove reply keyboard or to force a reply from the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, if edited message is sent by the bot, the
editedMessage is returned, otherwise ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
if inline_message_id is None and (chat_id is None or message_id is None):
raise ValueError(
'edit_message_reply_markup: Both chat_id and message_id are required when '
'inline_message_id is not specified')
url = '{0}/editMessageReplyMarkup'.format(self.base_url)
data = {}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
return url, data
@log
def get_updates(self,
offset=None,
limit=100,
timeout=0,
read_latency=2.,
allowed_updates=None,
**kwargs):
"""Use this method to receive incoming updates using long polling.
Args:
offset (:obj:`int`, optional): Identifier of the first update to be returned. Must be
greater by one than the highest among the identifiers of previously received
updates. By default, updates starting with the earliest unconfirmed update are
returned. An update is considered confirmed as soon as getUpdates is called with an
offset higher than its update_id. The negative offset can be specified to retrieve
updates starting from -offset update from the end of the updates queue. All
previous updates will forgotten.
limit (:obj:`int`, optional): Limits the number of updates to be retrieved. Values
between 1-100 are accepted. Defaults to 100.
timeout (:obj:`int`, optional): Timeout in seconds for long polling. Defaults to 0,
i.e. usual short polling. Should be positive, short polling should be used for
testing purposes only.
allowed_updates (List[:obj:`str`]), optional): List the types of updates you want your
bot to receive. For example, specify ["message", "edited_channel_post",
"callback_query"] to only receive updates of these types. See
:class:`telegram.Update` for a complete list of available update types.
Specify an empty list to receive all updates regardless of type (default). If not
specified, the previous setting will be used. Please note that this parameter
doesn't affect updates created before the call to the get_updates, so unwanted
updates may be received for a short period of time.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Notes:
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each
server response.
3. To take full advantage of this library take a look at :class:`telegram.ext.Updater`
Returns:
List[:class:`telegram.Update`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getUpdates'.format(self.base_url)
data = {'timeout': timeout}
if offset:
data['offset'] = offset
if limit:
data['limit'] = limit
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
data.update(kwargs)
# Ideally we'd use an aggressive read timeout for the polling. However,
# * Short polling should return within 2 seconds.
# * Long polling poses a different problem: the connection might have been dropped while
# waiting for the server to return and there's no way of knowing the connection had been
# dropped in real time.
result = self._request.post(url, data, timeout=float(read_latency) + float(timeout))
if result:
self.logger.debug('Getting updates: %s', [u['update_id'] for u in result])
else:
self.logger.debug('No new updates found.')
return [Update.de_json(u, self) for u in result]
@log
def set_webhook(self,
url=None,
certificate=None,
timeout=None,
max_connections=40,
allowed_updates=None,
**kwargs):
"""
Use this method to specify a url and receive incoming updates via an outgoing webhook.
Whenever there is an update for the bot, we will send an HTTPS POST request to the
specified url, containing a JSON-serialized Update. In case of an unsuccessful request,
we will give up after a reasonable amount of attempts.
If you'd like to make sure that the Webhook request comes from Telegram, we recommend
using a secret path in the URL, e.g. https://www.example.com/<token>. Since nobody else
knows your bot's token, you can be pretty sure it's us.
Note:
The certificate argument should be a file from disk ``open(filename, 'rb')``.
Args:
url (:obj:`str`): HTTPS url to send updates to. Use an empty string to remove webhook
integration.
certificate (:obj:`filelike`): Upload your public key certificate so that the root
certificate in use can be checked. See our self-signed guide for details.
(https://goo.gl/rw7w6Y)
max_connections (:obj:`int`, optional): Maximum allowed number of simultaneous HTTPS
connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower
values to limit the load on your bot's server, and higher values to increase your
bot's throughput.
allowed_updates (List[:obj:`str`], optional): List the types of updates you want your
bot to receive. For example, specify ["message", "edited_channel_post",
"callback_query"] to only receive updates of these types. See
:class:`telegram.Update` for a complete list of available update types. Specify an
empty list to receive all updates regardless of type (default). If not specified,
the previous setting will be used. Please note that this parameter doesn't affect
updates created before the call to the set_webhook, so unwanted updates may be
received for a short period of time.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Note:
1. You will not be able to receive updates using get_updates for as long as an outgoing
webhook is set up.
2. To use a self-signed certificate, you need to upload your public key certificate
using certificate parameter. Please upload as InputFile, sending a String will not
work.
3. Ports currently supported for Webhooks: 443, 80, 88, 8443.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/setWebhook'.format(self.base_url)
# Backwards-compatibility: 'url' used to be named 'webhook_url'
if 'webhook_url' in kwargs: # pragma: no cover
warnings.warn("The 'webhook_url' parameter has been renamed to 'url' in accordance "
"with the API")
if url is not None:
raise ValueError("The parameters 'url' and 'webhook_url' are mutually exclusive")
url = kwargs['webhook_url']
del kwargs['webhook_url']
data = {}
if url is not None:
data['url'] = url
if certificate:
if InputFile.is_file(certificate):
certificate = InputFile(certificate)
data['certificate'] = certificate
if max_connections is not None:
data['max_connections'] = max_connections
if allowed_updates is not None:
data['allowed_updates'] = allowed_updates
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def delete_webhook(self, timeout=None, **kwargs):
"""
Use this method to remove webhook integration if you decide to switch back to
getUpdates. Requires no parameters.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteWebhook'.format(self.base_url)
data = kwargs
result = self._request.post(url, data, timeout=timeout)
return result
@log
def leave_chat(self, chat_id, timeout=None, **kwargs):
"""Use this method for your bot to leave a group, supergroup or channel.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool` On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/leaveChat'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat(self, chat_id, timeout=None, **kwargs):
"""
Use this method to get up to date information about the chat (current name of the user for
one-on-one conversations, current username of a user, group or channel, etc.).
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Chat`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChat'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return Chat.de_json(result, self)
@log
def get_chat_administrators(self, chat_id, timeout=None, **kwargs):
"""
Use this method to get a list of administrators in a chat. On success, returns an Array of
ChatMember objects that contains information about all chat administrators except other
bots. If the chat is a group or a supergroup and no administrators were appointed,
only the creator will be returned.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.ChatMember`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatAdministrators'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return [ChatMember.de_json(x, self) for x in result]
@log
def get_chat_members_count(self, chat_id, timeout=None, **kwargs):
"""Use this method to get the number of members in a chat
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
int: Number of members in the chat.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMembersCount'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_chat_member(self, chat_id, user_id, timeout=None, **kwargs):
"""Use this method to get information about a member of a chat.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
user_id (:obj:`int`): Unique identifier of the target user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.ChatMember`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return ChatMember.de_json(result, self)
@log
def set_chat_sticker_set(self, chat_id, sticker_set_name, timeout=None, **kwargs):
"""Use this method to set a new group sticker set for a supergroup.
The bot must be an administrator in the chat for this to work and must have the appropriate
admin rights. Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned
in :attr:`get_chat` requests to check if the bot can use this method.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
sticker_set_name (:obj:`str`): Name of the sticker set to be set as the group
sticker set.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: True on success.
"""
url = '{0}/setChatStickerSet'.format(self.base_url)
data = {'chat_id': chat_id, 'sticker_set_name': sticker_set_name}
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_chat_sticker_set(self, chat_id, timeout=None, **kwargs):
"""Use this method to delete a group sticker set from a supergroup. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned in
:attr:`get_chat` requests to check if the bot can use this method.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: True on success.
"""
url = '{0}/deleteChatStickerSet'.format(self.base_url)
data = {'chat_id': chat_id}
result = self._request.post(url, data, timeout=timeout)
return result
def get_webhook_info(self, timeout=None, **kwargs):
"""Use this method to get current webhook status. Requires no parameters.
If the bot is using getUpdates, will return an object with the url field empty.
Args:
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.WebhookInfo`
"""
url = '{0}/getWebhookInfo'.format(self.base_url)
data = kwargs
result = self._request.post(url, data, timeout=timeout)
return WebhookInfo.de_json(result, self)
@log
@message
def set_game_score(self,
user_id,
score,
chat_id=None,
message_id=None,
inline_message_id=None,
force=None,
disable_edit_message=None,
timeout=None,
**kwargs):
"""
Use this method to set the score of the specified user in a game. On success, if the
message was sent by the bot, returns the edited Message, otherwise returns True. Returns
an error, if the new score is not greater than the user's current score in the chat and
force is False.
Args:
user_id (:obj:`int`): User identifier.
score (:obj:`int`): New score, must be non-negative.
force (:obj:`bool`, optional): Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters
disable_edit_message (:obj:`bool`, optional): Pass True, if the game message should not
be automatically edited to include the current scoreboard.
chat_id (int|str, optional): Required if inline_message_id is not specified. Unique
identifier for the target chat.
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: The edited message, or if the message wasn't sent by the bot
, ``True``.
Raises:
:class:`telegram.TelegramError`: If the new score is not greater than the user's
current score in the chat and force is False.
"""
url = '{0}/setGameScore'.format(self.base_url)
data = {'user_id': user_id, 'score': score}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
if force is not None:
data['force'] = force
if disable_edit_message is not None:
data['disable_edit_message'] = disable_edit_message
return url, data
@log
def get_game_high_scores(self,
user_id,
chat_id=None,
message_id=None,
inline_message_id=None,
timeout=None,
**kwargs):
"""
Use this method to get data for high score tables. Will return the score of the specified
user and several of his neighbors in a game
Args:
user_id (:obj:`int`): User identifier.
chat_id (:obj:`int` | :obj:`str`, optional): Required if inline_message_id is not
specified. Unique identifier for the target chat.
message_id (:obj:`int`, optional): Required if inline_message_id is not specified.
Identifier of the sent message.
inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not
specified. Identifier of the inline message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
List[:class:`telegram.GameHighScore`]
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getGameHighScores'.format(self.base_url)
data = {'user_id': user_id}
if chat_id:
data['chat_id'] = chat_id
if message_id:
data['message_id'] = message_id
if inline_message_id:
data['inline_message_id'] = inline_message_id
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return [GameHighScore.de_json(hs, self) for hs in result]
@log
@message
def send_invoice(self,
chat_id,
title,
description,
payload,
provider_token,
start_parameter,
currency,
prices,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
provider_data=None,
send_phone_number_to_provider=None,
send_email_to_provider=None,
timeout=None,
**kwargs):
"""Use this method to send invoices.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target private chat.
title (:obj:`str`): Product name.
description (:obj:`str`): Product description.
payload (:obj:`str`): Bot-defined invoice payload, 1-128 bytes. This will not be
displayed to the user, use for your internal processes.
provider_token (:obj:`str`): Payments provider token, obtained via Botfather.
start_parameter (:obj:`str`): Unique deep-linking parameter that can be used to
generate this invoice when used as a start parameter.
currency (:obj:`str`): Three-letter ISO 4217 currency code.
prices (List[:class:`telegram.LabeledPrice`)]: Price breakdown, a list of components
(e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.).
provider_data (:obj:`str` | :obj:`object`, optional): JSON-encoded data about the
invoice, which will be shared with the payment provider. A detailed description of
required fields should be provided by the payment provider. When an object is
passed, it will be encoded as JSON.
photo_url (:obj:`str`, optional): URL of the product photo for the invoice. Can be a
photo of the goods or a marketing image for a service. People like it better when
they see what they are paying for.
photo_size (:obj:`str`, optional): Photo size.
photo_width (:obj:`int`, optional): Photo width.
photo_height (:obj:`int`, optional): Photo height.
need_name (:obj:`bool`, optional): Pass True, if you require the user's full name to
complete the order.
need_phone_number (:obj:`bool`, optional): Pass True, if you require the user's
phone number to complete the order.
need_email (:obj:`bool`, optional): Pass True, if you require the user's email to
complete the order.
need_shipping_address (:obj:`bool`, optional): Pass True, if you require the user's
shipping address to complete the order.
send_phone_number_to_provider (:obj:`bool`, optional): Pass True, if user's phone
number should be sent to provider.
send_email_to_provider (:obj:`bool`, optional): Pass True, if user's email address
should be sent to provider.
is_flexible (:obj:`bool`, optional): Pass True, if the final price depends on the
shipping method.
disable_notification (:obj:`bool`, optional): Sends the message silently. Users will
receive a notification with no sound.
reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the
original message.
reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options.
An inlinekeyboard. If empty, one 'Pay total price' button will be shown.
If not empty, the first button must be a Pay button.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.Message`: On success, the sent Message is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/sendInvoice'.format(self.base_url)
data = {
'chat_id': chat_id,
'title': title,
'description': description,
'payload': payload,
'provider_token': provider_token,
'start_parameter': start_parameter,
'currency': currency,
'prices': [p.to_dict() for p in prices]
}
if provider_data is not None:
if isinstance(provider_data, string_types):
data['provider_data'] = provider_data
else:
data['provider_data'] = json.dumps(provider_data)
if photo_url is not None:
data['photo_url'] = photo_url
if photo_size is not None:
data['photo_size'] = photo_size
if photo_width is not None:
data['photo_width'] = photo_width
if photo_height is not None:
data['photo_height'] = photo_height
if need_name is not None:
data['need_name'] = need_name
if need_phone_number is not None:
data['need_phone_number'] = need_phone_number
if need_email is not None:
data['need_email'] = need_email
if need_shipping_address is not None:
data['need_shipping_address'] = need_shipping_address
if is_flexible is not None:
data['is_flexible'] = is_flexible
if send_phone_number_to_provider is not None:
data['send_phone_number_to_provider'] = send_email_to_provider
if send_email_to_provider is not None:
data['send_email_to_provider'] = send_email_to_provider
return url, data
@log
def answer_shipping_query(self,
shipping_query_id,
ok,
shipping_options=None,
error_message=None,
timeout=None,
**kwargs):
"""
If you sent an invoice requesting a shipping address and the parameter is_flexible was
specified, the Bot API will send an Update with a shipping_query field to the bot. Use
this method to reply to shipping queries.
Args:
shipping_query_id (:obj:`str`): Unique identifier for the query to be answered.
ok (:obj:`bool`): Specify True if delivery to the specified address is possible and
False if there are any problems (for example, if delivery to the specified address
is not possible).
shipping_options (List[:class:`telegram.ShippingOption`]), optional]: Required if ok is
True. A JSON-serialized array of available shipping options.
error_message (:obj:`str`, optional): Required if ok is False. Error message in
human readable form that explains why it is impossible to complete the order (e.g.
"Sorry, delivery to your desired address is unavailable"). Telegram will display
this message to the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`; On success, True is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if ok and (shipping_options is None or error_message is not None):
raise TelegramError(
'answerShippingQuery: If ok is True, shipping_options '
'should not be empty and there should not be error_message')
if not ok and (shipping_options is not None or error_message is None):
raise TelegramError(
'answerShippingQuery: If ok is False, error_message '
'should not be empty and there should not be shipping_options')
url_ = '{0}/answerShippingQuery'.format(self.base_url)
data = {'shipping_query_id': shipping_query_id, 'ok': ok}
if ok:
data['shipping_options'] = [option.to_dict() for option in shipping_options]
if error_message is not None:
data['error_message'] = error_message
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def answer_pre_checkout_query(self, pre_checkout_query_id, ok,
error_message=None, timeout=None, **kwargs):
"""
Once the user has confirmed their payment and shipping details, the Bot API sends the final
confirmation in the form of an Update with the field pre_checkout_query. Use this method to
respond to such pre-checkout queries.
Note:
The Bot API must receive an answer within 10 seconds after the pre-checkout
query was sent.
Args:
pre_checkout_query_id (:obj:`str`): Unique identifier for the query to be answered.
ok (:obj:`bool`): Specify True if everything is alright (goods are available, etc.) and
the bot is ready to proceed with the order. Use False if there are any problems.
error_message (:obj:`str`, optional): Required if ok is False. Error message in human
readable form that explains the reason for failure to proceed with the checkout
(e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you
were busy filling out your payment details. Please choose a different color or
garment!"). Telegram will display this message to the user.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
ok = bool(ok)
if not (ok ^ (error_message is not None)):
raise TelegramError(
'answerPreCheckoutQuery: If ok is True, there should '
'not be error_message; if ok is False, error_message '
'should not be empty')
url_ = '{0}/answerPreCheckoutQuery'.format(self.base_url)
data = {'pre_checkout_query_id': pre_checkout_query_id, 'ok': ok}
if error_message is not None:
data['error_message'] = error_message
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
@log
def restrict_chat_member(self, chat_id, user_id, until_date=None, can_send_messages=None,
can_send_media_messages=None, can_send_other_messages=None,
can_add_web_page_previews=None, timeout=None, **kwargs):
"""
Use this method to restrict a user in a supergroup. The bot must be an administrator in
the supergroup for this to work and must have the appropriate admin rights. Pass True for
all boolean parameters to lift restrictions from a user.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
user_id (:obj:`int`): Unique identifier of the target user.
until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when restrictions
will be lifted for the user, unix time. If user is restricted for more than 366
days or less than 30 seconds from the current time, they are considered to be
restricted forever.
can_send_messages (:obj:`bool`, optional): Pass True, if the user can send text
messages, contacts, locations and venues.
can_send_media_messages (:obj:`bool`, optional): Pass True, if the user can send
audios, documents, photos, videos, video notes and voice notes, implies
can_send_messages.
can_send_other_messages (:obj:`bool`, optional): Pass True, if the user can send
animations, games, stickers and use inline bots, implies can_send_media_messages.
can_add_web_page_previews (:obj:`bool`, optional): Pass True, if the user may add
web page previews to their messages, implies can_send_media_messages.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/restrictChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
if until_date is not None:
if isinstance(until_date, datetime):
until_date = to_timestamp(until_date)
data['until_date'] = until_date
if can_send_messages is not None:
data['can_send_messages'] = can_send_messages
if can_send_media_messages is not None:
data['can_send_media_messages'] = can_send_media_messages
if can_send_other_messages is not None:
data['can_send_other_messages'] = can_send_other_messages
if can_add_web_page_previews is not None:
data['can_add_web_page_previews'] = can_add_web_page_previews
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def promote_chat_member(self, chat_id, user_id, can_change_info=None,
can_post_messages=None, can_edit_messages=None,
can_delete_messages=None, can_invite_users=None,
can_restrict_members=None, can_pin_messages=None,
can_promote_members=None, timeout=None, **kwargs):
"""
Use this method to promote or demote a user in a supergroup or a channel. The bot must be
an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target supergroup (in the format @supergroupusername).
user_id (:obj:`int`): Unique identifier of the target user.
can_change_info (:obj:`bool`, optional): Pass True, if the administrator can change
chat title, photo and other settings.
can_post_messages (:obj:`bool`, optional): Pass True, if the administrator can
create channel posts, channels only.
can_edit_messages (:obj:`bool`, optional): Pass True, if the administrator can edit
messages of other users, channels only.
can_delete_messages (:obj:`bool`, optional): Pass True, if the administrator can
delete messages of other users.
can_invite_users (:obj:`bool`, optional): Pass True, if the administrator can invite
new users to the chat.
can_restrict_members (:obj:`bool`, optional): Pass True, if the administrator can
restrict, ban or unban chat members.
can_pin_messages (:obj:`bool`, optional): Pass True, if the administrator can pin
messages, supergroups only.
can_promote_members (:obj:`bool`, optional): Pass True, if the administrator can add
new administrators with a subset of his own privileges or demote administrators
that he has promoted, directly or indirectly (promoted by administrators that were
appointed by him).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/promoteChatMember'.format(self.base_url)
data = {'chat_id': chat_id, 'user_id': user_id}
if can_change_info is not None:
data['can_change_info'] = can_change_info
if can_post_messages is not None:
data['can_post_messages'] = can_post_messages
if can_edit_messages is not None:
data['can_edit_messages'] = can_edit_messages
if can_delete_messages is not None:
data['can_delete_messages'] = can_delete_messages
if can_invite_users is not None:
data['can_invite_users'] = can_invite_users
if can_restrict_members is not None:
data['can_restrict_members'] = can_restrict_members
if can_pin_messages is not None:
data['can_pin_messages'] = can_pin_messages
if can_promote_members is not None:
data['can_promote_members'] = can_promote_members
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def export_chat_invite_link(self, chat_id, timeout=None, **kwargs):
"""
Use this method to export an invite link to a supergroup or a channel. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`str`: Exported invite link on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/exportChatInviteLink'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_photo(self, chat_id, photo, timeout=None, **kwargs):
"""Use this method to set a new profile photo for the chat.
Photos can't be changed for private chats. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
photo (`filelike object`): New chat photo.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns True on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatPhoto'.format(self.base_url)
if InputFile.is_file(photo):
photo = InputFile(photo)
data = {'chat_id': chat_id, 'photo': photo}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_chat_photo(self, chat_id, timeout=None, **kwargs):
"""
Use this method to delete a chat photo. Photos can't be changed for private chats. The bot
must be an administrator in the chat for this to work and must have the appropriate admin
rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteChatPhoto'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_title(self, chat_id, title, timeout=None, **kwargs):
"""
Use this method to change the title of a chat. Titles can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate
admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
title (:obj:`str`): New chat title, 1-255 characters.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Note:
In regular groups (non-supergroups), this method will only work if the
'All Members Are Admins' setting is off in the target group.
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatTitle'.format(self.base_url)
data = {'chat_id': chat_id, 'title': title}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_chat_description(self, chat_id, description, timeout=None, **kwargs):
"""
Use this method to change the description of a supergroup or a channel. The bot must be an
administrator in the chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
description (:obj:`str`): New chat description, 1-255 characters.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setChatDescription'.format(self.base_url)
data = {'chat_id': chat_id, 'description': description}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def pin_chat_message(self, chat_id, message_id, disable_notification=None, timeout=None,
**kwargs):
"""
Use this method to pin a message in a supergroup. The bot must be an administrator in the
chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
message_id (:obj:`int`): Identifier of a message to pin.
disable_notification (:obj:`bool`, optional): Pass True, if it is not necessary to send
a notification to all group members about the new pinned message.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/pinChatMessage'.format(self.base_url)
data = {'chat_id': chat_id, 'message_id': message_id}
if disable_notification is not None:
data['disable_notification'] = disable_notification
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def unpin_chat_message(self, chat_id, timeout=None, **kwargs):
"""
Use this method to unpin a message in a supergroup. The bot must be an administrator in the
chat for this to work and must have the appropriate admin rights.
Args:
chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username
of the target`channel (in the format @channelusername).
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments
Returns:
:obj:`bool`: Returns ``True`` on success.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/unpinChatMessage'.format(self.base_url)
data = {'chat_id': chat_id}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def get_sticker_set(self, name, timeout=None, **kwargs):
"""Use this method to get a sticker set.
Args:
name (:obj:`str`): Short name of the sticker set that is used in t.me/addstickers/
URLs (e.g., animals)
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.StickerSet`
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/getStickerSet'.format(self.base_url)
data = {'name': name}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return StickerSet.de_json(result, self)
@log
def upload_sticker_file(self, user_id, png_sticker, timeout=None, **kwargs):
"""
Use this method to upload a .png file with a sticker for later use in
:attr:`create_new_sticker_set` and :attr:`add_sticker_to_set` methods (can be used multiple
times).
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of sticker file owner.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker,
must be up to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:class:`telegram.File`: The uploaded File
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/uploadStickerFile'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'png_sticker': png_sticker}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return File.de_json(result, self)
@log
def create_new_sticker_set(self, user_id, name, title, png_sticker, emojis,
contains_masks=None, mask_position=None, timeout=None, **kwargs):
"""Use this method to create new sticker set owned by a user.
The bot will be able to edit the created sticker set.
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of created sticker set owner.
name (:obj:`str`): Short name of sticker set, to be used in t.me/addstickers/ URLs
(e.g., animals). Can contain only english letters, digits and underscores.
Must begin with a letter, can't contain consecutive underscores and
must end in "_by_<bot username>". <bot_username> is case insensitive.
1-64 characters.
title (:obj:`str`): Sticker set title, 1-64 characters.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up
to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px. Pass a file_id as a String to
send a file that already exists on the Telegram servers, pass an HTTP URL as a
String for Telegram to get a file from the Internet, or upload a new one
using multipart/form-data.
emojis (:obj:`str`): One or more emoji corresponding to the sticker.
contains_masks (:obj:`bool`, optional): Pass True, if a set of mask stickers should be
created.
mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask
should be placed on faces.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/createNewStickerSet'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'name': name, 'title': title, 'png_sticker': png_sticker,
'emojis': emojis}
if contains_masks is not None:
data['contains_masks'] = contains_masks
if mask_position is not None:
data['mask_position'] = mask_position
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def add_sticker_to_set(self, user_id, name, png_sticker, emojis, mask_position=None,
timeout=None, **kwargs):
"""Use this method to add a new sticker to a set created by the bot.
Note:
The png_sticker argument can be either a file_id, an URL or a file from disk
``open(filename, 'rb')``
Args:
user_id (:obj:`int`): User identifier of created sticker set owner.
name (:obj:`str`): Sticker set name.
png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up
to 512 kilobytes in size, dimensions must not exceed 512px,
and either width or height must be exactly 512px. Pass a file_id as a String to
send a file that already exists on the Telegram servers, pass an HTTP URL as a
String for Telegram to get a file from the Internet, or upload a new one
using multipart/form-data.
emojis (:obj:`str`): One or more emoji corresponding to the sticker.
mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask
should beplaced on faces.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/addStickerToSet'.format(self.base_url)
if InputFile.is_file(png_sticker):
png_sticker = InputFile(png_sticker)
data = {'user_id': user_id, 'name': name, 'png_sticker': png_sticker, 'emojis': emojis}
if mask_position is not None:
data['mask_position'] = mask_position
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_sticker_position_in_set(self, sticker, position, timeout=None, **kwargs):
"""Use this method to move a sticker in a set created by the bot to a specific position.
Args:
sticker (:obj:`str`): File identifier of the sticker.
position (:obj:`int`): New sticker position in the set, zero-based.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/setStickerPositionInSet'.format(self.base_url)
data = {'sticker': sticker, 'position': position}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def delete_sticker_from_set(self, sticker, timeout=None, **kwargs):
"""Use this method to delete a sticker from a set created by the bot.
Args:
sticker (:obj:`str`): File identifier of the sticker.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url = '{0}/deleteStickerFromSet'.format(self.base_url)
data = {'sticker': sticker}
data.update(kwargs)
result = self._request.post(url, data, timeout=timeout)
return result
@log
def set_passport_data_errors(self, user_id, errors, timeout=None, **kwargs):
"""
Informs a user that some of the Telegram Passport elements they provided contains errors.
The user will not be able to re-submit their Passport to you until the errors are fixed
(the contents of the field for which you returned the error must change). Returns True
on success.
Use this if the data submitted by the user doesn't satisfy the standards your service
requires for any reason. For example, if a birthday date seems invalid, a submitted
document is blurry, a scan shows evidence of tampering, etc. Supply some details in the
error message to make sure the user knows how to correct the issues.
Args:
user_id (:obj:`int`): User identifier
errors (List[:class:`PassportElementError`]): A JSON-serialized array describing the
errors.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during
creation of the connection pool).
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Returns:
:obj:`bool`: On success, ``True`` is returned.
Raises:
:class:`telegram.TelegramError`
"""
url_ = '{0}/setPassportDataErrors'.format(self.base_url)
data = {'user_id': user_id, 'errors': [error.to_dict() for error in errors]}
data.update(kwargs)
result = self._request.post(url_, data, timeout=timeout)
return result
def to_dict(self):
data = {'id': self.id, 'username': self.username, 'first_name': self.username}
if self.last_name:
data['last_name'] = self.last_name
return data
def __reduce__(self):
return (self.__class__, (self.token, self.base_url.replace(self.token, ''),
self.base_file_url.replace(self.token, '')))
# camelCase aliases
getMe = get_me
"""Alias for :attr:`get_me`"""
sendMessage = send_message
"""Alias for :attr:`send_message`"""
deleteMessage = delete_message
"""Alias for :attr:`delete_message`"""
forwardMessage = forward_message
"""Alias for :attr:`forward_message`"""
sendPhoto = send_photo
"""Alias for :attr:`send_photo`"""
sendAudio = send_audio
"""Alias for :attr:`send_audio`"""
sendDocument = send_document
"""Alias for :attr:`send_document`"""
sendSticker = send_sticker
"""Alias for :attr:`send_sticker`"""
sendVideo = send_video
"""Alias for :attr:`send_video`"""
sendAnimation = send_animation
"""Alias for :attr:`send_animation`"""
sendVoice = send_voice
"""Alias for :attr:`send_voice`"""
sendVideoNote = send_video_note
"""Alias for :attr:`send_video_note`"""
sendMediaGroup = send_media_group
"""Alias for :attr:`send_media_group`"""
sendLocation = send_location
"""Alias for :attr:`send_location`"""
editMessageLiveLocation = edit_message_live_location
"""Alias for :attr:`edit_message_live_location`"""
stopMessageLiveLocation = stop_message_live_location
"""Alias for :attr:`stop_message_live_location`"""
sendVenue = send_venue
"""Alias for :attr:`send_venue`"""
sendContact = send_contact
"""Alias for :attr:`send_contact`"""
sendGame = send_game
"""Alias for :attr:`send_game`"""
sendChatAction = send_chat_action
"""Alias for :attr:`send_chat_action`"""
answerInlineQuery = answer_inline_query
"""Alias for :attr:`answer_inline_query`"""
getUserProfilePhotos = get_user_profile_photos
"""Alias for :attr:`get_user_profile_photos`"""
getFile = get_file
"""Alias for :attr:`get_file`"""
kickChatMember = kick_chat_member
"""Alias for :attr:`kick_chat_member`"""
unbanChatMember = unban_chat_member
"""Alias for :attr:`unban_chat_member`"""
answerCallbackQuery = answer_callback_query
"""Alias for :attr:`answer_callback_query`"""
editMessageText = edit_message_text
"""Alias for :attr:`edit_message_text`"""
editMessageCaption = edit_message_caption
"""Alias for :attr:`edit_message_caption`"""
editMessageMedia = edit_message_media
"""Alias for :attr:`edit_message_media`"""
editMessageReplyMarkup = edit_message_reply_markup
"""Alias for :attr:`edit_message_reply_markup`"""
getUpdates = get_updates
"""Alias for :attr:`get_updates`"""
setWebhook = set_webhook
"""Alias for :attr:`set_webhook`"""
deleteWebhook = delete_webhook
"""Alias for :attr:`delete_webhook`"""
leaveChat = leave_chat
"""Alias for :attr:`leave_chat`"""
getChat = get_chat
"""Alias for :attr:`get_chat`"""
getChatAdministrators = get_chat_administrators
"""Alias for :attr:`get_chat_administrators`"""
getChatMember = get_chat_member
"""Alias for :attr:`get_chat_member`"""
setChatStickerSet = set_chat_sticker_set
"""Alias for :attr:`set_chat_sticker_set`"""
deleteChatStickerSet = delete_chat_sticker_set
"""Alias for :attr:`delete_chat_sticker_set`"""
getChatMembersCount = get_chat_members_count
"""Alias for :attr:`get_chat_members_count`"""
getWebhookInfo = get_webhook_info
"""Alias for :attr:`get_webhook_info`"""
setGameScore = set_game_score
"""Alias for :attr:`set_game_score`"""
getGameHighScores = get_game_high_scores
"""Alias for :attr:`get_game_high_scores`"""
sendInvoice = send_invoice
"""Alias for :attr:`send_invoice`"""
answerShippingQuery = answer_shipping_query
"""Alias for :attr:`answer_shipping_query`"""
answerPreCheckoutQuery = answer_pre_checkout_query
"""Alias for :attr:`answer_pre_checkout_query`"""
restrictChatMember = restrict_chat_member
"""Alias for :attr:`restrict_chat_member`"""
promoteChatMember = promote_chat_member
"""Alias for :attr:`promote_chat_member`"""
exportChatInviteLink = export_chat_invite_link
"""Alias for :attr:`export_chat_invite_link`"""
setChatPhoto = set_chat_photo
"""Alias for :attr:`set_chat_photo`"""
deleteChatPhoto = delete_chat_photo
"""Alias for :attr:`delete_chat_photo`"""
setChatTitle = set_chat_title
"""Alias for :attr:`set_chat_title`"""
setChatDescription = set_chat_description
"""Alias for :attr:`set_chat_description`"""
pinChatMessage = pin_chat_message
"""Alias for :attr:`pin_chat_message`"""
unpinChatMessage = unpin_chat_message
"""Alias for :attr:`unpin_chat_message`"""
getStickerSet = get_sticker_set
"""Alias for :attr:`get_sticker_set`"""
uploadStickerFile = upload_sticker_file
"""Alias for :attr:`upload_sticker_file`"""
createNewStickerSet = create_new_sticker_set
"""Alias for :attr:`create_new_sticker_set`"""
addStickerToSet = add_sticker_to_set
"""Alias for :attr:`add_sticker_to_set`"""
setStickerPositionInSet = set_sticker_position_in_set
"""Alias for :attr:`set_sticker_position_in_set`"""
deleteStickerFromSet = delete_sticker_from_set
"""Alias for :attr:`delete_sticker_from_set`"""
setPassportDataErrors = set_passport_data_errors
"""Alias for :attr:`set_passport_data_errors`"""
|
en
| 0.704532
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=E0611,E0213,E1102,C0103,E1101,W0613,R0913,R0904 # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. This module contains an object that represents a Telegram Bot. This object represents a Telegram Bot. Args: token (:obj:`str`): Bot's unique authentication. base_url (:obj:`str`, optional): Telegram Bot API service URL. base_file_url (:obj:`str`, optional): Telegram Bot API file URL. request (:obj:`telegram.utils.request.Request`, optional): Pre initialized :obj:`telegram.utils.request.Request`. private_key (:obj:`bytes`, optional): Private key for decryption of telegram passport data. private_key_password (:obj:`bytes`, optional): Password for above private key. A very basic validation on token. :obj:`int`: Unique identifier for this bot. :obj:`str`: Bot's first name. :obj:`str`: Optional. Bot's last name. :obj:`str`: Bot's username. :obj:`str`: Bot's @username. A simple method for testing your bot's auth token. Requires no parameters. Args: timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). Returns: :class:`telegram.User`: A :class:`telegram.User` instance representing that bot if the credentials are valid, :obj:`None` otherwise. Raises: :class:`telegram.TelegramError` Use this method to send text messages. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). text (:obj:`str`): Text of the message to be sent. Max 4096 characters. Also found as :attr:`telegram.constants.MAX_MESSAGE_LENGTH`. parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. See the constants in :class:`telegram.ParseMode` for the available modes. disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in this message. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent message is returned. Raises: :class:`telegram.TelegramError` Use this method to delete a message. A message can only be deleted if it was sent less than 48 hours ago. Any such recently sent outgoing message may be deleted. Additionally, if the bot is an administrator in a group chat, it can delete any message. If the bot is an administrator in a supergroup, it can delete messages from any other user and service messages about people joining or leaving the group (other types of service messages may only be removed by the group creator). In channels, bots can only remove their own messages. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). message_id (:obj:`int`): Identifier of the message to delete. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to forward messages of any kind. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). from_chat_id (:obj:`int` | :obj:`str`): Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername). disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. message_id (:obj:`int`): Message identifier in the chat specified in from_chat_id. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send photos. Note: The photo argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). photo (:obj:`str` | `filelike object` | :class:`telegram.PhotoSize`): Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. Lastly you can pass an existing :class:`telegram.PhotoSize` object to send. caption (:obj:`str`, optional): Photo caption (may also be used when resending photos by file_id), 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For sending voice messages, use the sendVoice method instead. Note: The audio argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). audio (:obj:`str` | `filelike object` | :class:`telegram.Audio`): Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. Lastly you can pass an existing :class:`telegram.Audio` object to send. caption (:obj:`str`, optional): Audio caption, 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. duration (:obj:`int`, optional): Duration of sent audio in seconds. performer (:obj:`str`, optional): Performer. title (:obj:`str`, optional): Track name. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. thumb (`filelike object`, optional): Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 90. Ignored if the file is not is passed as a string or file_id. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send general files. Note: The document argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). document (:obj:`str` | `filelike object` | :class:`telegram.Document`): File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. Lastly you can pass an existing :class:`telegram.Document` object to send. filename (:obj:`str`, optional): File name that shows in telegram message (it is useful when you send file generated by temp module, for example). Undocumented. caption (:obj:`str`, optional): Document caption (may also be used when resending documents by file_id), 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. thumb (`filelike object`, optional): Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 90. Ignored if the file is not is passed as a string or file_id. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send .webp stickers. Note: The sticker argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). sticker (:obj:`str` | `filelike object` :class:`telegram.Sticker`): Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .webp file from the Internet, or upload a new one using multipart/form-data. Lastly you can pass an existing :class:`telegram.Sticker` object to send. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document). Note: The video argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). video (:obj:`str` | `filelike object` | :class:`telegram.Video`): Video file to send. Pass a file_id as String to send an video file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an video file from the Internet, or upload a new one using multipart/form-data. Lastly you can pass an existing :class:`telegram.Video` object to send. duration (:obj:`int`, optional): Duration of sent video in seconds. width (:obj:`int`, optional): Video width. height (:obj:`int`, optional): Video height. caption (:obj:`str`, optional): Video caption (may also be used when resending videos by file_id), 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. supports_streaming (:obj:`bool`, optional): Pass True, if the uploaded video is suitable for streaming. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. thumb (`filelike object`, optional): Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 90. Ignored if the file is not is passed as a string or file_id. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send video messages. Note: The video_note argument can be either a file_id or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). video_note (:obj:`str` | `filelike object` | :class:`telegram.VideoNote`): Video note to send. Pass a file_id as String to send a video note that exists on the Telegram servers (recommended) or upload a new video using multipart/form-data. Or you can pass an existing :class:`telegram.VideoNote` object to send. Sending video notes by a URL is currently unsupported. duration (:obj:`int`, optional): Duration of sent video in seconds. length (:obj:`int`, optional): Video width and height disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. thumb (`filelike object`, optional): Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 90. Ignored if the file is not is passed as a string or file_id. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). animation (:obj:`str` | `filelike object` | :class:`telegram.Animation`): Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data. Lastly you can pass an existing :class:`telegram.Animation` object to send. duration (:obj:`int`, optional): Duration of sent animation in seconds. width (:obj:`int`, optional): Animation width. height (:obj:`int`, optional): Animation height. thumb (`filelike object`, optional): Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 90. Ignored if the file is not is passed as a string or file_id. caption (:obj:`str`, optional): Animation caption (may also be used when resending animations by file_id), 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS (other formats may be sent as Audio or Document). Note: The voice argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). voice (:obj:`str` | `filelike object` | :class:`telegram.Voice`): Voice file to send. Pass a file_id as String to send an voice file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an voice file from the Internet, or upload a new one using multipart/form-data. Lastly you can pass an existing :class:`telegram.Voice` object to send. caption (:obj:`str`, optional): Voice message caption, 0-200 characters. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. duration (:obj:`int`, optional): Duration of the voice message in seconds. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send a group of photos or videos as an album. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). media (List[:class:`telegram.InputMedia`]): An array describing photos and videos to be sent, must include 2–10 items. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. timeout (:obj:`int` | :obj:`float`, optional): Send file timeout (default: 20 seconds). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: List[:class:`telegram.Message`]: An array of the sent Messages. Raises: :class:`telegram.TelegramError` Use this method to send point on the map. Note: You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). latitude (:obj:`float`, optional): Latitude of location. longitude (:obj:`float`, optional): Longitude of location. location (:class:`telegram.Location`, optional): The location to send. live_period (:obj:`int`, optional): Period in seconds for which the location will be updated, should be between 60 and 86400. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to edit live location messages sent by the bot or via the bot (for inline bots). A location can be edited until its :attr:`live_period` expires or editing is explicitly disabled by a call to :attr:`stop_message_live_location`. Note: You can either supply a :obj:`latitude` and :obj:`longitude` or a :obj:`location`. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. latitude (:obj:`float`, optional): Latitude of location. longitude (:obj:`float`, optional): Longitude of location. location (:class:`telegram.Location`, optional): The location to send. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). Returns: :class:`telegram.Message`: On success the edited message. Use this method to stop updating a live location message sent by the bot or via the bot (for inline bots) before live_period expires. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). Returns: :class:`telegram.Message`: On success the edited message. Use this method to send information about a venue. Note: you can either supply :obj:`venue`, or :obj:`latitude`, :obj:`longitude`, :obj:`title` and :obj:`address` and optionally :obj:`foursquare_id` and optionally :obj:`foursquare_type`. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). latitude (:obj:`float`, optional): Latitude of venue. longitude (:obj:`float`, optional): Longitude of venue. title (:obj:`str`, optional): Name of the venue. address (:obj:`str`, optional): Address of the venue. foursquare_id (:obj:`str`, optional): Foursquare identifier of the venue. foursquare_type (:obj:`str`, optional): Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) venue (:class:`telegram.Venue`, optional): The venue to send. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send phone contacts. Note: You can either supply :obj:`contact` or :obj:`phone_number` and :obj:`first_name` with optionally :obj:`last_name` and optionally :obj:`vcard`. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). phone_number (:obj:`str`, optional): Contact's phone number. first_name (:obj:`str`, optional): Contact's first name. last_name (:obj:`str`, optional): Contact's last name. vcard (:obj:`str`, optional): Additional data about the contact in the form of a vCard, 0-2048 bytes. contact (:class:`telegram.Contact`, optional): The contact to send. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method to send a game. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). game_short_name (:obj:`str`): Short name of the game, serves as the unique identifier for the game. Set up your games via Botfather. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). action(:class:`telegram.ChatAction` | :obj:`str`): Type of action to broadcast. Choose one, depending on what the user is about to receive. For convenience look at the constants in :class:`telegram.ChatAction` timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to send answers to an inline query. No more than 50 results per query are allowed. Args: inline_query_id (:obj:`str`): Unique identifier for the answered query. results (List[:class:`telegram.InlineQueryResult`)]: A list of results for the inline query. cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. is_personal (:obj:`bool`, optional): Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query. next_offset (:obj:`str`, optional): Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don't support pagination. Offset length can't exceed 64 bytes. switch_pm_text (:obj:`str`, optional): If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter. switch_pm_parameter (:obj:`str`, optional): Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as he read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a 'Connect your YouTube account' button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an oauth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to get a list of profile pictures for a user. Args: user_id (:obj:`int`): Unique identifier of the target user. offset (:obj:`int`, optional): Sequential number of the first photo to be returned. By default, all photos are returned. limit (:obj:`int`, optional): Limits the number of photos to be retrieved. Values between 1-100 are accepted. Defaults to 100. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.UserProfilePhotos` Raises: :class:`telegram.TelegramError` Use this method to get basic info about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. The file can then be downloaded with :attr:`telegram.File.download`. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling get_file again. Args: file_id (:obj:`str` | :class:`telegram.Audio` | :class:`telegram.Document` | \ :class:`telegram.PhotoSize` | :class:`telegram.Sticker` | \ :class:`telegram.Video` | :class:`telegram.VideoNote` | \ :class:`telegram.Voice`): Either the file identifier or an object that has a file_id attribute to get file information about. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.File` Raises: :class:`telegram.TelegramError` Use this method to kick a user from a group or a supergroup. In the case of supergroups, the user will not be able to return to the group on their own using invite links, etc., unless unbanned first. The bot must be an administrator in the group for this to work. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). user_id (:obj:`int`): Unique identifier of the target user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever. **kwargs (:obj:`dict`): Arbitrary keyword arguments. Note: In regular groups (non-supergroups), this method will only work if the 'All Members Are Admins' setting is off in the target group. Otherwise members may only be removed by the group's creator or by the member that added them. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to unban a previously kicked user in a supergroup. The user will not return to the group automatically, but will be able to join via link, etc. The bot must be an administrator in the group for this to work. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). user_id (:obj:`int`): Unique identifier of the target user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. Alternatively, the user can be redirected to the specified Game URL. For this option to work, you must first create a game for your bot via BotFather and accept the terms. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. Args: callback_query_id (:obj:`str`): Unique identifier for the query to be answered. text (:obj:`str`, optional): Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters. show_alert (:obj:`bool`, optional): If true, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false. url (:obj:`str`, optional): URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your game - note that this will only work if the query comes from a callback game button. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. cache_time (:obj:`int`, optional): The maximum amount of time in seconds that the result of the callback query may be cached client-side. Defaults to 0. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to edit text and game messages sent by the bot or via the bot (for inline bots). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. text (:obj:`str`): New text of the message. parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. See the constants in :class:`telegram.ParseMode` for the available modes. disable_web_page_preview (:obj:`bool`, optional): Disables link previews for links in this message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, if edited message is sent by the bot, the edited Message is returned, otherwise ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to edit captions of messages sent by the bot or via the bot (for inline bots). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. caption (:obj:`str`, optional): New caption of the message. parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, if edited message is sent by the bot, the edited Message is returned, otherwise ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to edit audio, document, photo, or video messages. If a message is a part of a message album, then it can be edited only to a photo or a video. Otherwise, message type can be changed arbitrarily. When inline message is edited, new file can't be uploaded. Use previously uploaded file via its file_id or specify a URL. On success, if the edited message was sent by the bot, the edited Message is returned, otherwise True is returned. Args: chat_id (:obj:`int` | :obj:`str`, optional): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. media (:class:`telegram.InputMedia`): An object for a new media content of the message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Use this method to edit only the reply markup of messages sent by the bot or via the bot (for inline bots). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, if edited message is sent by the bot, the editedMessage is returned, otherwise ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to receive incoming updates using long polling. Args: offset (:obj:`int`, optional): Identifier of the first update to be returned. Must be greater by one than the highest among the identifiers of previously received updates. By default, updates starting with the earliest unconfirmed update are returned. An update is considered confirmed as soon as getUpdates is called with an offset higher than its update_id. The negative offset can be specified to retrieve updates starting from -offset update from the end of the updates queue. All previous updates will forgotten. limit (:obj:`int`, optional): Limits the number of updates to be retrieved. Values between 1-100 are accepted. Defaults to 100. timeout (:obj:`int`, optional): Timeout in seconds for long polling. Defaults to 0, i.e. usual short polling. Should be positive, short polling should be used for testing purposes only. allowed_updates (List[:obj:`str`]), optional): List the types of updates you want your bot to receive. For example, specify ["message", "edited_channel_post", "callback_query"] to only receive updates of these types. See :class:`telegram.Update` for a complete list of available update types. Specify an empty list to receive all updates regardless of type (default). If not specified, the previous setting will be used. Please note that this parameter doesn't affect updates created before the call to the get_updates, so unwanted updates may be received for a short period of time. **kwargs (:obj:`dict`): Arbitrary keyword arguments. Notes: 1. This method will not work if an outgoing webhook is set up. 2. In order to avoid getting duplicate updates, recalculate offset after each server response. 3. To take full advantage of this library take a look at :class:`telegram.ext.Updater` Returns: List[:class:`telegram.Update`] Raises: :class:`telegram.TelegramError` # Ideally we'd use an aggressive read timeout for the polling. However, # * Short polling should return within 2 seconds. # * Long polling poses a different problem: the connection might have been dropped while # waiting for the server to return and there's no way of knowing the connection had been # dropped in real time. Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. If you'd like to make sure that the Webhook request comes from Telegram, we recommend using a secret path in the URL, e.g. https://www.example.com/<token>. Since nobody else knows your bot's token, you can be pretty sure it's us. Note: The certificate argument should be a file from disk ``open(filename, 'rb')``. Args: url (:obj:`str`): HTTPS url to send updates to. Use an empty string to remove webhook integration. certificate (:obj:`filelike`): Upload your public key certificate so that the root certificate in use can be checked. See our self-signed guide for details. (https://goo.gl/rw7w6Y) max_connections (:obj:`int`, optional): Maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower values to limit the load on your bot's server, and higher values to increase your bot's throughput. allowed_updates (List[:obj:`str`], optional): List the types of updates you want your bot to receive. For example, specify ["message", "edited_channel_post", "callback_query"] to only receive updates of these types. See :class:`telegram.Update` for a complete list of available update types. Specify an empty list to receive all updates regardless of type (default). If not specified, the previous setting will be used. Please note that this parameter doesn't affect updates created before the call to the set_webhook, so unwanted updates may be received for a short period of time. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Note: 1. You will not be able to receive updates using get_updates for as long as an outgoing webhook is set up. 2. To use a self-signed certificate, you need to upload your public key certificate using certificate parameter. Please upload as InputFile, sending a String will not work. 3. Ports currently supported for Webhooks: 443, 80, 88, 8443. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` # Backwards-compatibility: 'url' used to be named 'webhook_url' # pragma: no cover Use this method to remove webhook integration if you decide to switch back to getUpdates. Requires no parameters. Args: timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method for your bot to leave a group, supergroup or channel. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool` On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Chat` Raises: :class:`telegram.TelegramError` Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: List[:class:`telegram.ChatMember`] Raises: :class:`telegram.TelegramError` Use this method to get the number of members in a chat Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: int: Number of members in the chat. Raises: :class:`telegram.TelegramError` Use this method to get information about a member of a chat. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). user_id (:obj:`int`): Unique identifier of the target user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.ChatMember` Raises: :class:`telegram.TelegramError` Use this method to set a new group sticker set for a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned in :attr:`get_chat` requests to check if the bot can use this method. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername). sticker_set_name (:obj:`str`): Name of the sticker set to be set as the group sticker set. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: True on success. Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Use the field :attr:`telegram.Chat.can_set_sticker_set` optionally returned in :attr:`get_chat` requests to check if the bot can use this method. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: True on success. Use this method to get current webhook status. Requires no parameters. If the bot is using getUpdates, will return an object with the url field empty. Args: timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.WebhookInfo` Use this method to set the score of the specified user in a game. On success, if the message was sent by the bot, returns the edited Message, otherwise returns True. Returns an error, if the new score is not greater than the user's current score in the chat and force is False. Args: user_id (:obj:`int`): User identifier. score (:obj:`int`): New score, must be non-negative. force (:obj:`bool`, optional): Pass True, if the high score is allowed to decrease. This can be useful when fixing mistakes or banning cheaters disable_edit_message (:obj:`bool`, optional): Pass True, if the game message should not be automatically edited to include the current scoreboard. chat_id (int|str, optional): Required if inline_message_id is not specified. Unique identifier for the target chat. message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: The edited message, or if the message wasn't sent by the bot , ``True``. Raises: :class:`telegram.TelegramError`: If the new score is not greater than the user's current score in the chat and force is False. Use this method to get data for high score tables. Will return the score of the specified user and several of his neighbors in a game Args: user_id (:obj:`int`): User identifier. chat_id (:obj:`int` | :obj:`str`, optional): Required if inline_message_id is not specified. Unique identifier for the target chat. message_id (:obj:`int`, optional): Required if inline_message_id is not specified. Identifier of the sent message. inline_message_id (:obj:`str`, optional): Required if chat_id and message_id are not specified. Identifier of the inline message. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: List[:class:`telegram.GameHighScore`] Raises: :class:`telegram.TelegramError` Use this method to send invoices. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target private chat. title (:obj:`str`): Product name. description (:obj:`str`): Product description. payload (:obj:`str`): Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes. provider_token (:obj:`str`): Payments provider token, obtained via Botfather. start_parameter (:obj:`str`): Unique deep-linking parameter that can be used to generate this invoice when used as a start parameter. currency (:obj:`str`): Three-letter ISO 4217 currency code. prices (List[:class:`telegram.LabeledPrice`)]: Price breakdown, a list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.). provider_data (:obj:`str` | :obj:`object`, optional): JSON-encoded data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider. When an object is passed, it will be encoded as JSON. photo_url (:obj:`str`, optional): URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. People like it better when they see what they are paying for. photo_size (:obj:`str`, optional): Photo size. photo_width (:obj:`int`, optional): Photo width. photo_height (:obj:`int`, optional): Photo height. need_name (:obj:`bool`, optional): Pass True, if you require the user's full name to complete the order. need_phone_number (:obj:`bool`, optional): Pass True, if you require the user's phone number to complete the order. need_email (:obj:`bool`, optional): Pass True, if you require the user's email to complete the order. need_shipping_address (:obj:`bool`, optional): Pass True, if you require the user's shipping address to complete the order. send_phone_number_to_provider (:obj:`bool`, optional): Pass True, if user's phone number should be sent to provider. send_email_to_provider (:obj:`bool`, optional): Pass True, if user's email address should be sent to provider. is_flexible (:obj:`bool`, optional): Pass True, if the final price depends on the shipping method. disable_notification (:obj:`bool`, optional): Sends the message silently. Users will receive a notification with no sound. reply_to_message_id (:obj:`int`, optional): If the message is a reply, ID of the original message. reply_markup (:class:`telegram.ReplyMarkup`, optional): Additional interface options. An inlinekeyboard. If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.Message`: On success, the sent Message is returned. Raises: :class:`telegram.TelegramError` If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Use this method to reply to shipping queries. Args: shipping_query_id (:obj:`str`): Unique identifier for the query to be answered. ok (:obj:`bool`): Specify True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible). shipping_options (List[:class:`telegram.ShippingOption`]), optional]: Required if ok is True. A JSON-serialized array of available shipping options. error_message (:obj:`str`, optional): Required if ok is False. Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable"). Telegram will display this message to the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`; On success, True is returned. Raises: :class:`telegram.TelegramError` Once the user has confirmed their payment and shipping details, the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query. Use this method to respond to such pre-checkout queries. Note: The Bot API must receive an answer within 10 seconds after the pre-checkout query was sent. Args: pre_checkout_query_id (:obj:`str`): Unique identifier for the query to be answered. ok (:obj:`bool`): Specify True if everything is alright (goods are available, etc.) and the bot is ready to proceed with the order. Use False if there are any problems. error_message (:obj:`str`, optional): Required if ok is False. Error message in human readable form that explains the reason for failure to proceed with the checkout (e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling out your payment details. Please choose a different color or garment!"). Telegram will display this message to the user. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to restrict a user in a supergroup. The bot must be an administrator in the supergroup for this to work and must have the appropriate admin rights. Pass True for all boolean parameters to lift restrictions from a user. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername). user_id (:obj:`int`): Unique identifier of the target user. until_date (:obj:`int` | :obj:`datetime.datetime`, optional): Date when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever. can_send_messages (:obj:`bool`, optional): Pass True, if the user can send text messages, contacts, locations and venues. can_send_media_messages (:obj:`bool`, optional): Pass True, if the user can send audios, documents, photos, videos, video notes and voice notes, implies can_send_messages. can_send_other_messages (:obj:`bool`, optional): Pass True, if the user can send animations, games, stickers and use inline bots, implies can_send_media_messages. can_add_web_page_previews (:obj:`bool`, optional): Pass True, if the user may add web page previews to their messages, implies can_send_media_messages. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`bool`: Returns True on success. Raises: :class:`telegram.TelegramError` Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Pass False for all boolean parameters to demote a user Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername). user_id (:obj:`int`): Unique identifier of the target user. can_change_info (:obj:`bool`, optional): Pass True, if the administrator can change chat title, photo and other settings. can_post_messages (:obj:`bool`, optional): Pass True, if the administrator can create channel posts, channels only. can_edit_messages (:obj:`bool`, optional): Pass True, if the administrator can edit messages of other users, channels only. can_delete_messages (:obj:`bool`, optional): Pass True, if the administrator can delete messages of other users. can_invite_users (:obj:`bool`, optional): Pass True, if the administrator can invite new users to the chat. can_restrict_members (:obj:`bool`, optional): Pass True, if the administrator can restrict, ban or unban chat members. can_pin_messages (:obj:`bool`, optional): Pass True, if the administrator can pin messages, supergroups only. can_promote_members (:obj:`bool`, optional): Pass True, if the administrator can add new administrators with a subset of his own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`bool`: Returns True on success. Raises: :class:`telegram.TelegramError` Use this method to export an invite link to a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`str`: Exported invite link on success. Raises: :class:`telegram.TelegramError` Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). photo (`filelike object`): New chat photo. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Note: In regular groups (non-supergroups), this method will only work if the 'All Members Are Admins' setting is off in the target group. Returns: :obj:`bool`: Returns True on success. Raises: :class:`telegram.TelegramError` Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Note: In regular groups (non-supergroups), this method will only work if the 'All Members Are Admins' setting is off in the target group. Returns: :obj:`bool`: Returns ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). title (:obj:`str`): New chat title, 1-255 characters. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Note: In regular groups (non-supergroups), this method will only work if the 'All Members Are Admins' setting is off in the target group. Returns: :obj:`bool`: Returns ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to change the description of a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). description (:obj:`str`): New chat description, 1-255 characters. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`bool`: Returns ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to pin a message in a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). message_id (:obj:`int`): Identifier of a message to pin. disable_notification (:obj:`bool`, optional): Pass True, if it is not necessary to send a notification to all group members about the new pinned message. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`bool`: Returns ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to unpin a message in a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Args: chat_id (:obj:`int` | :obj:`str`): Unique identifier for the target chat or username of the target`channel (in the format @channelusername). timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments Returns: :obj:`bool`: Returns ``True`` on success. Raises: :class:`telegram.TelegramError` Use this method to get a sticker set. Args: name (:obj:`str`): Short name of the sticker set that is used in t.me/addstickers/ URLs (e.g., animals) timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.StickerSet` Raises: :class:`telegram.TelegramError` Use this method to upload a .png file with a sticker for later use in :attr:`create_new_sticker_set` and :attr:`add_sticker_to_set` methods (can be used multiple times). Note: The png_sticker argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: user_id (:obj:`int`): User identifier of sticker file owner. png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :class:`telegram.File`: The uploaded File Raises: :class:`telegram.TelegramError` Use this method to create new sticker set owned by a user. The bot will be able to edit the created sticker set. Note: The png_sticker argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: user_id (:obj:`int`): User identifier of created sticker set owner. name (:obj:`str`): Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals). Can contain only english letters, digits and underscores. Must begin with a letter, can't contain consecutive underscores and must end in "_by_<bot username>". <bot_username> is case insensitive. 1-64 characters. title (:obj:`str`): Sticker set title, 1-64 characters. png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. emojis (:obj:`str`): One or more emoji corresponding to the sticker. contains_masks (:obj:`bool`, optional): Pass True, if a set of mask stickers should be created. mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask should be placed on faces. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to add a new sticker to a set created by the bot. Note: The png_sticker argument can be either a file_id, an URL or a file from disk ``open(filename, 'rb')`` Args: user_id (:obj:`int`): User identifier of created sticker set owner. name (:obj:`str`): Sticker set name. png_sticker (:obj:`str` | `filelike object`): Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. emojis (:obj:`str`): One or more emoji corresponding to the sticker. mask_position (:class:`telegram.MaskPosition`, optional): Position where the mask should beplaced on faces. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to move a sticker in a set created by the bot to a specific position. Args: sticker (:obj:`str`): File identifier of the sticker. position (:obj:`int`): New sticker position in the set, zero-based. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Use this method to delete a sticker from a set created by the bot. Args: sticker (:obj:`str`): File identifier of the sticker. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success. Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues. Args: user_id (:obj:`int`): User identifier errors (List[:class:`PassportElementError`]): A JSON-serialized array describing the errors. timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as the read timeout from the server (instead of the one specified during creation of the connection pool). **kwargs (:obj:`dict`): Arbitrary keyword arguments. Returns: :obj:`bool`: On success, ``True`` is returned. Raises: :class:`telegram.TelegramError` # camelCase aliases Alias for :attr:`get_me` Alias for :attr:`send_message` Alias for :attr:`delete_message` Alias for :attr:`forward_message` Alias for :attr:`send_photo` Alias for :attr:`send_audio` Alias for :attr:`send_document` Alias for :attr:`send_sticker` Alias for :attr:`send_video` Alias for :attr:`send_animation` Alias for :attr:`send_voice` Alias for :attr:`send_video_note` Alias for :attr:`send_media_group` Alias for :attr:`send_location` Alias for :attr:`edit_message_live_location` Alias for :attr:`stop_message_live_location` Alias for :attr:`send_venue` Alias for :attr:`send_contact` Alias for :attr:`send_game` Alias for :attr:`send_chat_action` Alias for :attr:`answer_inline_query` Alias for :attr:`get_user_profile_photos` Alias for :attr:`get_file` Alias for :attr:`kick_chat_member` Alias for :attr:`unban_chat_member` Alias for :attr:`answer_callback_query` Alias for :attr:`edit_message_text` Alias for :attr:`edit_message_caption` Alias for :attr:`edit_message_media` Alias for :attr:`edit_message_reply_markup` Alias for :attr:`get_updates` Alias for :attr:`set_webhook` Alias for :attr:`delete_webhook` Alias for :attr:`leave_chat` Alias for :attr:`get_chat` Alias for :attr:`get_chat_administrators` Alias for :attr:`get_chat_member` Alias for :attr:`set_chat_sticker_set` Alias for :attr:`delete_chat_sticker_set` Alias for :attr:`get_chat_members_count` Alias for :attr:`get_webhook_info` Alias for :attr:`set_game_score` Alias for :attr:`get_game_high_scores` Alias for :attr:`send_invoice` Alias for :attr:`answer_shipping_query` Alias for :attr:`answer_pre_checkout_query` Alias for :attr:`restrict_chat_member` Alias for :attr:`promote_chat_member` Alias for :attr:`export_chat_invite_link` Alias for :attr:`set_chat_photo` Alias for :attr:`delete_chat_photo` Alias for :attr:`set_chat_title` Alias for :attr:`set_chat_description` Alias for :attr:`pin_chat_message` Alias for :attr:`unpin_chat_message` Alias for :attr:`get_sticker_set` Alias for :attr:`upload_sticker_file` Alias for :attr:`create_new_sticker_set` Alias for :attr:`add_sticker_to_set` Alias for :attr:`set_sticker_position_in_set` Alias for :attr:`delete_sticker_from_set` Alias for :attr:`set_passport_data_errors`
| 1.906595
| 2
|
test/connectivity/acts/tests/google/wifi/WifiPnoTest.py
|
Keneral/atools
| 0
|
6625379
|
#
# Copyright 2014 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import acts.base_test
import acts.test_utils.wifi.wifi_test_utils as wutils
from acts import asserts
WifiEnums = wutils.WifiEnums
WifiEventNames = wutils.WifiEventNames
class WifiPnoTest(acts.base_test.BaseTestClass):
def __init__(self, controllers):
acts.base_test.BaseTestClass.__init__(self, controllers)
self.tests = (
"test_simple_pno_connection",
"test_pno_connection_with_multiple_saved_networks",
)
def setup_class(self):
self.dut = self.android_devices[0]
wutils.wifi_test_device_init(self.dut)
req_params = (
"attn_vals",
"pno_network_a",
"pno_network_b",
"pno_interval"
)
self.unpack_userparams(req_params)
self.attn_a = self.attenuators[0]
self.attn_b = self.attenuators[1]
self.set_attns("default")
def setup_test(self):
self.dut.droid.wifiStartTrackingStateChange()
self.dut.droid.wakeLockAcquireBright()
self.dut.droid.wakeUpNow()
wutils.reset_wifi(self.dut)
self.dut.ed.clear_all_events()
def teardown_test(self):
self.dut.droid.wifiStopTrackingStateChange()
self.dut.droid.wakeLockRelease()
self.dut.droid.goToSleepNow()
wutils.reset_wifi(self.dut)
self.dut.ed.clear_all_events()
self.set_attns("default")
def on_fail(self, test_name, begin_time):
self.dut.take_bug_report(test_name, begin_time)
self.dut.cat_adb_log(test_name, begin_time)
"""Helper Functions"""
def set_attns(self, attn_val_name):
"""Sets attenuation values on attenuators used in this test.
Args:
attn_val_name: Name of the attenuation value pair to use.
"""
msg = "Set attenuation values to %s" % self.attn_vals[attn_val_name]
self.log.info(msg)
try:
self.attn_a.set_atten(self.attn_vals[attn_val_name][0])
self.attn_b.set_atten(self.attn_vals[attn_val_name][1])
except:
msg = "Failed to set attenuation values %s." % attn_val_name
self.log.error(msg)
raise
def trigger_pno_and_assert_connect(self, attn_val_name, expected_con):
"""Sets attenuators to disconnect current connection and power-off the
screen to trigger PNO. Validate that the DUT connected to the new SSID
as expected after PNO.
Args:
attn_val_name: Name of the attenuation value pair to use.
expected_con: The expected info of the network to we expect the DUT
to roam to.
"""
connection_info = self.dut.droid.wifiGetConnectionInfo()
self.log.info("Triggering PNO connect from %s to %s" %
(connection_info[WifiEnums.SSID_KEY],
expected_con[WifiEnums.SSID_KEY]))
self.dut.droid.goToSleepNow()
self.set_attns(attn_val_name)
self.log.info("Wait %ss for PNO to trigger." % self.pno_interval)
time.sleep(self.pno_interval)
try:
self.dut.droid.wakeLockAcquireBright()
self.dut.droid.wakeUpNow()
expected_ssid = expected_con[WifiEnums.SSID_KEY]
verify_con = { WifiEnums.SSID_KEY : expected_ssid }
wutils.verify_wifi_connection_info(self.dut, verify_con)
self.log.info("Connected to %s successfully after PNO" %
expected_ssid)
finally:
self.dut.droid.wifiLockRelease()
self.dut.droid.goToSleepNow()
def add_dummy_networks(self, num_networks):
"""Add some dummy networks to the device.
Args:
num_networks: Number of networks to add.
"""
ssid_name_base = "pno_dummy_network_"
for i in range(0, num_networks) :
network = {}
network[WifiEnums.SSID_KEY] = ssid_name_base + str(i)
network[WifiEnums.PWD_KEY] = "pno_dummy";
asserts.assert_true(self.dut.droid.wifiAddNetwork(network) != -1,
"Add network %r failed" % network)
""" Tests Begin """
def test_simple_pno_connection(self):
"""Test PNO triggered autoconnect to a network.
Steps:
1. Save 2 valid network configurations (a & b) in the device.
2. Attenuate network b.
3. Connect the device to network a.
4. Switch off the screen on the device.
5. Attenuate network a and remove attenuation on network b and wait for
a few seconds to trigger PNO.
6. Check the device connected to network b automatically.
7. Switch off the screen on the device.
8. Attenuate network b and remove attenuation on network a and wait for
a few seconds to trigger PNO.
9. Check the device connected to network a automatically.
"""
asserts.assert_true(
self.dut.droid.wifiAddNetwork(self.pno_network_a) != -1,
"Add network %r failed" % self.pno_network_a)
asserts.assert_true(
self.dut.droid.wifiAddNetwork(self.pno_network_b) != -1,
"Add network %r failed" % self.pno_network_b)
self.set_attns("a_on_b_off")
wutils.wifi_connect(self.dut, self.pno_network_a),
self.trigger_pno_and_assert_connect("b_on_a_off", self.pno_network_b)
self.trigger_pno_and_assert_connect("a_on_b_off", self.pno_network_a)
def test_pno_connection_with_multiple_saved_networks(self):
"""Test PNO triggered autoconnect to a network when there are more
than 16 networks saved in the device.
16 is the max list size of PNO watch list for most devices. The device
should automatically pick the 16 latest added networks in the list.
So add 16 dummy networks and then add 2 valid networks.
Steps:
1. Save 16 dummy network configurations in the device.
2. Run the simple pno test.
"""
self.add_dummy_networks(16)
self.test_simple_pno_connection()
""" Tests End """
|
#
# Copyright 2014 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import acts.base_test
import acts.test_utils.wifi.wifi_test_utils as wutils
from acts import asserts
WifiEnums = wutils.WifiEnums
WifiEventNames = wutils.WifiEventNames
class WifiPnoTest(acts.base_test.BaseTestClass):
def __init__(self, controllers):
acts.base_test.BaseTestClass.__init__(self, controllers)
self.tests = (
"test_simple_pno_connection",
"test_pno_connection_with_multiple_saved_networks",
)
def setup_class(self):
self.dut = self.android_devices[0]
wutils.wifi_test_device_init(self.dut)
req_params = (
"attn_vals",
"pno_network_a",
"pno_network_b",
"pno_interval"
)
self.unpack_userparams(req_params)
self.attn_a = self.attenuators[0]
self.attn_b = self.attenuators[1]
self.set_attns("default")
def setup_test(self):
self.dut.droid.wifiStartTrackingStateChange()
self.dut.droid.wakeLockAcquireBright()
self.dut.droid.wakeUpNow()
wutils.reset_wifi(self.dut)
self.dut.ed.clear_all_events()
def teardown_test(self):
self.dut.droid.wifiStopTrackingStateChange()
self.dut.droid.wakeLockRelease()
self.dut.droid.goToSleepNow()
wutils.reset_wifi(self.dut)
self.dut.ed.clear_all_events()
self.set_attns("default")
def on_fail(self, test_name, begin_time):
self.dut.take_bug_report(test_name, begin_time)
self.dut.cat_adb_log(test_name, begin_time)
"""Helper Functions"""
def set_attns(self, attn_val_name):
"""Sets attenuation values on attenuators used in this test.
Args:
attn_val_name: Name of the attenuation value pair to use.
"""
msg = "Set attenuation values to %s" % self.attn_vals[attn_val_name]
self.log.info(msg)
try:
self.attn_a.set_atten(self.attn_vals[attn_val_name][0])
self.attn_b.set_atten(self.attn_vals[attn_val_name][1])
except:
msg = "Failed to set attenuation values %s." % attn_val_name
self.log.error(msg)
raise
def trigger_pno_and_assert_connect(self, attn_val_name, expected_con):
"""Sets attenuators to disconnect current connection and power-off the
screen to trigger PNO. Validate that the DUT connected to the new SSID
as expected after PNO.
Args:
attn_val_name: Name of the attenuation value pair to use.
expected_con: The expected info of the network to we expect the DUT
to roam to.
"""
connection_info = self.dut.droid.wifiGetConnectionInfo()
self.log.info("Triggering PNO connect from %s to %s" %
(connection_info[WifiEnums.SSID_KEY],
expected_con[WifiEnums.SSID_KEY]))
self.dut.droid.goToSleepNow()
self.set_attns(attn_val_name)
self.log.info("Wait %ss for PNO to trigger." % self.pno_interval)
time.sleep(self.pno_interval)
try:
self.dut.droid.wakeLockAcquireBright()
self.dut.droid.wakeUpNow()
expected_ssid = expected_con[WifiEnums.SSID_KEY]
verify_con = { WifiEnums.SSID_KEY : expected_ssid }
wutils.verify_wifi_connection_info(self.dut, verify_con)
self.log.info("Connected to %s successfully after PNO" %
expected_ssid)
finally:
self.dut.droid.wifiLockRelease()
self.dut.droid.goToSleepNow()
def add_dummy_networks(self, num_networks):
"""Add some dummy networks to the device.
Args:
num_networks: Number of networks to add.
"""
ssid_name_base = "pno_dummy_network_"
for i in range(0, num_networks) :
network = {}
network[WifiEnums.SSID_KEY] = ssid_name_base + str(i)
network[WifiEnums.PWD_KEY] = "pno_dummy";
asserts.assert_true(self.dut.droid.wifiAddNetwork(network) != -1,
"Add network %r failed" % network)
""" Tests Begin """
def test_simple_pno_connection(self):
"""Test PNO triggered autoconnect to a network.
Steps:
1. Save 2 valid network configurations (a & b) in the device.
2. Attenuate network b.
3. Connect the device to network a.
4. Switch off the screen on the device.
5. Attenuate network a and remove attenuation on network b and wait for
a few seconds to trigger PNO.
6. Check the device connected to network b automatically.
7. Switch off the screen on the device.
8. Attenuate network b and remove attenuation on network a and wait for
a few seconds to trigger PNO.
9. Check the device connected to network a automatically.
"""
asserts.assert_true(
self.dut.droid.wifiAddNetwork(self.pno_network_a) != -1,
"Add network %r failed" % self.pno_network_a)
asserts.assert_true(
self.dut.droid.wifiAddNetwork(self.pno_network_b) != -1,
"Add network %r failed" % self.pno_network_b)
self.set_attns("a_on_b_off")
wutils.wifi_connect(self.dut, self.pno_network_a),
self.trigger_pno_and_assert_connect("b_on_a_off", self.pno_network_b)
self.trigger_pno_and_assert_connect("a_on_b_off", self.pno_network_a)
def test_pno_connection_with_multiple_saved_networks(self):
"""Test PNO triggered autoconnect to a network when there are more
than 16 networks saved in the device.
16 is the max list size of PNO watch list for most devices. The device
should automatically pick the 16 latest added networks in the list.
So add 16 dummy networks and then add 2 valid networks.
Steps:
1. Save 16 dummy network configurations in the device.
2. Run the simple pno test.
"""
self.add_dummy_networks(16)
self.test_simple_pno_connection()
""" Tests End """
|
en
| 0.834798
|
# # Copyright 2014 - The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Helper Functions Sets attenuation values on attenuators used in this test. Args: attn_val_name: Name of the attenuation value pair to use. Sets attenuators to disconnect current connection and power-off the screen to trigger PNO. Validate that the DUT connected to the new SSID as expected after PNO. Args: attn_val_name: Name of the attenuation value pair to use. expected_con: The expected info of the network to we expect the DUT to roam to. Add some dummy networks to the device. Args: num_networks: Number of networks to add. Tests Begin Test PNO triggered autoconnect to a network. Steps: 1. Save 2 valid network configurations (a & b) in the device. 2. Attenuate network b. 3. Connect the device to network a. 4. Switch off the screen on the device. 5. Attenuate network a and remove attenuation on network b and wait for a few seconds to trigger PNO. 6. Check the device connected to network b automatically. 7. Switch off the screen on the device. 8. Attenuate network b and remove attenuation on network a and wait for a few seconds to trigger PNO. 9. Check the device connected to network a automatically. Test PNO triggered autoconnect to a network when there are more than 16 networks saved in the device. 16 is the max list size of PNO watch list for most devices. The device should automatically pick the 16 latest added networks in the list. So add 16 dummy networks and then add 2 valid networks. Steps: 1. Save 16 dummy network configurations in the device. 2. Run the simple pno test. Tests End
| 1.968906
| 2
|
for-ncclient/rpc.py
|
HPENetworking/hpe-cw7-ansible
| 4
|
6625380
|
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Event, Lock
from uuid import uuid4
import six
from ncclient.xml_ import *
from ncclient.logging_ import SessionLoggerAdapter
from ncclient.transport import SessionListener
from ncclient.operations.errors import OperationError, TimeoutExpiredError, MissingCapabilityError
import logging
logger = logging.getLogger("ncclient.operations.rpc")
class RPCError(OperationError):
"Represents an `rpc-error`. It is a type of :exc:`OperationError` and can be raised as such."
tag_to_attr = {
qualify("error-type"): "_type",
qualify("error-tag"): "_tag",
qualify("error-severity"): "_severity",
qualify("error-info"): "_info",
qualify("error-path"): "_path",
qualify("error-message"): "_message"
}
def __init__(self, raw, errs=None):
self._raw = raw
if errs is None:
# Single RPCError
for attr in six.itervalues(RPCError.tag_to_attr):
setattr(self, attr, None)
for subele in raw:
attr = RPCError.tag_to_attr.get(subele.tag, None)
if attr is not None:
setattr(self, attr, subele.text if attr != "_info" else to_xml(subele) )
if self.message is not None:
OperationError.__init__(self, self.message)
else:
OperationError.__init__(self, self.to_dict())
else:
# Multiple errors returned. Errors is a list of RPCError objs
errlist = []
for err in errs:
if err.severity:
errsev = err.severity
else:
errsev = 'undefined'
if err.message:
errmsg = err.message
else:
errmsg = 'not an error message in the reply. Enable debug'
errordict = {"severity": errsev, "message":errmsg}
errlist.append(errordict)
# We are interested in the severity and the message
self._severity = 'warning'
self._message = "\n".join(["%s: %s" %(err['severity'].strip(), err['message'].strip()) for err in errlist])
self.errors = errs
has_error = filter(lambda higherr: higherr['severity'] == 'error', errlist)
if has_error:
self._severity = 'error'
OperationError.__init__(self, self.message)
def to_dict(self):
return dict([ (attr[1:], getattr(self, attr)) for attr in six.itervalues(RPCError.tag_to_attr) ])
@property
def xml(self):
"The `rpc-error` element as returned in XML. \
Multiple errors are returned as list of RPC errors"
return self._raw
@property
def type(self):
"The contents of the `error-type` element."
return self._type
@property
def tag(self):
"The contents of the `error-tag` element."
return self._tag
@property
def severity(self):
"The contents of the `error-severity` element."
return self._severity
@property
def path(self):
"The contents of the `error-path` element if present or `None`."
return self._path
@property
def message(self):
"The contents of the `error-message` element if present or `None`."
return self._message
@property
def info(self):
"XML string or `None`; representing the `error-info` element."
return self._info
class RPCReply(object):
"""Represents an *rpc-reply*. Only concerns itself with whether the operation was successful.
.. note::
If the reply has not yet been parsed there is an implicit, one-time parsing overhead to
accessing some of the attributes defined by this class.
"""
ERROR_CLS = RPCError
"Subclasses can specify a different error class, but it should be a subclass of `RPCError`."
def __init__(self, raw):
self._raw = raw
self._parsed = False
self._root = None
self._errors = []
def __repr__(self):
return self._raw
def parse(self):
"Parses the *rpc-reply*."
if self._parsed: return
if 'Please wait...' in str(self._raw) and 'diagnostic-information' in str(self._raw) and 'Save successfully' in str(self._raw):
# print(str(self._reply))
# res = re.sub(r"<CLI>.*</CLI>", "<ok/>", str(self._raw))
str1 = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<rpc-reply xmlns:config=\"http://www.hp.com/netconf/config:1.0\" xmlns:data=\"http://www.hp.com/netconf/data:1.0\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"urn:uuid:db6c046f-db5d-40fb-b431-7c1057feddbb\">
<ok/>
</rpc-reply>\n
'''
# res = re.sub(str(self._reply), "<ok/>", str(self._reply))
self._raw = str1
root = self._root = to_ele(self._raw) # The <rpc-reply> element
# Per RFC 4741 an <ok/> tag is sent when there are no errors or warnings
ok = root.find(qualify("ok"))
if ok is None:
# Create RPCError objects from <rpc-error> elements
error = root.find('.//'+qualify('rpc-error'))
if error is not None:
for err in root.getiterator(error.tag):
# Process a particular <rpc-error>
self._errors.append(self.ERROR_CLS(err))
self._parsing_hook(root)
self._parsed = True
def _parsing_hook(self, root):
"No-op by default. Gets passed the *root* element for the reply."
pass
@property
def xml(self):
"*rpc-reply* element as returned."
return self._raw
@property
def ok(self):
"Boolean value indicating if there were no errors."
self.parse()
return not self.errors # empty list => false
@property
def error(self):
"Returns the first :class:`RPCError` and `None` if there were no errors."
self.parse()
if self._errors:
return self._errors[0]
else:
return None
@property
def errors(self):
"List of `RPCError` objects. Will be empty if there were no *rpc-error* elements in reply."
self.parse()
return self._errors
class RPCReplyListener(SessionListener): # internal use
creation_lock = Lock()
# one instance per session -- maybe there is a better way??
def __new__(cls, session, device_handler):
with RPCReplyListener.creation_lock:
instance = session.get_listener_instance(cls)
if instance is None:
instance = object.__new__(cls)
instance._lock = Lock()
instance._id2rpc = {}
instance._device_handler = device_handler
#instance._pipelined = session.can_pipeline
session.add_listener(instance)
instance.logger = SessionLoggerAdapter(logger,
{'session': session})
return instance
def register(self, id, rpc):
with self._lock:
self._id2rpc[id] = rpc
def callback(self, root, raw):
tag, attrs = root
if self._device_handler.perform_qualify_check():
if tag != qualify("rpc-reply"):
return
if "message-id" not in attrs:
# required attribute so raise OperationError
raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
else:
id = attrs["message-id"] # get the msgid
with self._lock:
try:
rpc = self._id2rpc[id] # the corresponding rpc
self.logger.debug("Delivering to %r", rpc)
rpc.deliver_reply(raw)
except KeyError:
raise OperationError("Unknown 'message-id': %s" % id)
# no catching other exceptions, fail loudly if must
else:
# if no error delivering, can del the reference to the RPC
del self._id2rpc[id]
def errback(self, err):
try:
for rpc in six.itervalues(self._id2rpc):
rpc.deliver_error(err)
finally:
self._id2rpc.clear()
class RaiseMode(object):
"""
Define how errors indicated by RPC should be handled.
Note that any error_filters defined in the device handler will still be
applied, even if ERRORS or ALL is defined: If the filter matches, an exception
will NOT be raised.
"""
NONE = 0
"Don't attempt to raise any type of `rpc-error` as :exc:`RPCError`."
ERRORS = 1
"Raise only when the `error-type` indicates it is an honest-to-god error."
ALL = 2
"Don't look at the `error-type`, always raise."
class RPC(object):
"""Base class for all operations, directly corresponding to *rpc* requests. Handles making the request, and taking delivery of the reply."""
DEPENDS = []
"""Subclasses can specify their dependencies on capabilities as a list of URI's or abbreviated names, e.g. ':writable-running'. These are verified at the time of instantiation. If the capability is not available, :exc:`MissingCapabilityError` is raised."""
REPLY_CLS = RPCReply
"By default :class:`RPCReply`. Subclasses can specify a :class:`RPCReply` subclass."
def __init__(self, session, device_handler, async_mode=False, timeout=30, raise_mode=RaiseMode.NONE):
"""
*session* is the :class:`~ncclient.transport.Session` instance
*device_handler" is the :class:`~ncclient.devices.*.*DeviceHandler` instance
*async* specifies whether the request is to be made asynchronously, see :attr:`is_async`
*timeout* is the timeout for a synchronous request, see :attr:`timeout`
*raise_mode* specifies the exception raising mode, see :attr:`raise_mode`
"""
self._session = session
try:
for cap in self.DEPENDS:
self._assert(cap)
except AttributeError:
pass
self._async = async_mode
self._timeout = timeout
self._raise_mode = raise_mode
self._id = uuid4().urn # Keeps things simple instead of having a class attr with running ID that has to be locked
self._listener = RPCReplyListener(session, device_handler)
self._listener.register(self._id, self)
self._reply = None
self._error = None
self._event = Event()
self._device_handler = device_handler
self.logger = SessionLoggerAdapter(logger, {'session': session})
def _wrap(self, subele):
# internal use
ele = new_ele("rpc", {"message-id": self._id},
**self._device_handler.get_xml_extra_prefix_kwargs())
ele.append(subele)
#print to_xml(ele)
return to_xml(ele)
def _request(self, op):
"""Implementations of :meth:`request` call this method to send the request and process the reply.
In synchronous mode, blocks until the reply is received and returns :class:`RPCReply`. Depending on the :attr:`raise_mode` a `rpc-error` element in the reply may lead to an :exc:`RPCError` exception.
In asynchronous mode, returns immediately, returning `self`. The :attr:`event` attribute will be set when the reply has been received (see :attr:`reply`) or an error occured (see :attr:`error`).
*op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element`
"""
self.logger.info('Requesting %r', self.__class__.__name__)
req = self._wrap(op)
self._session.send(req)
if self._async:
self.logger.debug('Async request, returning %r', self)
return self
else:
self.logger.debug('Sync request, will wait for timeout=%r', self._timeout)
self._event.wait(self._timeout)
if self._event.isSet():
if self._error:
# Error that prevented reply delivery
raise self._error
self._reply.parse()
if self._reply.error is not None and not self._device_handler.is_rpc_error_exempt(self._reply.error.message):
# <rpc-error>'s [ RPCError ]
if self._raise_mode == RaiseMode.ALL or (self._raise_mode == RaiseMode.ERRORS and self._reply.error.severity == "error"):
errlist = []
errors = self._reply.errors
if len(errors) > 1:
raise RPCError(to_ele(self._reply._raw), errs=errors)
else:
raise self._reply.error
if self._device_handler.transform_reply():
return NCElement(self._reply, self._device_handler.transform_reply())
else:
return self._reply
else:
raise TimeoutExpiredError('ncclient timed out while waiting for an rpc reply.')
def request(self):
"""Subclasses must implement this method. Typically only the request needs to be built as an
:class:`~xml.etree.ElementTree.Element` and everything else can be handed off to
:meth:`_request`."""
pass
def _assert(self, capability):
"""Subclasses can use this method to verify that a capability is available with the NETCONF
server, before making a request that requires it. A :exc:`MissingCapabilityError` will be
raised if the capability is not available."""
if capability not in self._session.server_capabilities:
raise MissingCapabilityError('Server does not support [%s]' % capability)
def deliver_reply(self, raw):
# internal use
self._reply = self.REPLY_CLS(raw)
self._event.set()
def deliver_error(self, err):
# internal use
self._error = err
self._event.set()
@property
def reply(self):
":class:`RPCReply` element if reply has been received or `None`"
return self._reply
@property
def error(self):
""":exc:`Exception` type if an error occured or `None`.
.. note::
This represents an error which prevented a reply from being received. An *rpc-error*
does not fall in that category -- see `RPCReply` for that.
"""
return self._error
@property
def id(self):
"The *message-id* for this RPC."
return self._id
@property
def session(self):
"The `~ncclient.transport.Session` object associated with this RPC."
return self._session
@property
def event(self):
""":class:`~threading.Event` that is set when reply has been received or when an error preventing
delivery of the reply occurs.
"""
return self._event
def __set_async(self, async_mode=True):
self._async = async_mode
if async_mode and not self._session.can_pipeline:
raise UserWarning('Asynchronous mode not supported for this device/session')
def __set_raise_mode(self, mode):
assert(mode in (RaiseMode.NONE, RaiseMode.ERRORS, RaiseMode.ALL))
self._raise_mode = mode
def __set_timeout(self, timeout):
self._timeout = timeout
raise_mode = property(fget=lambda self: self._raise_mode, fset=__set_raise_mode)
"""Depending on this exception raising mode, an `rpc-error` in the reply may be raised as an :exc:`RPCError` exception. Valid values are the constants defined in :class:`RaiseMode`. """
is_async = property(fget=lambda self: self._async, fset=__set_async)
"""Specifies whether this RPC will be / was requested asynchronously. By default RPC's are synchronous."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Timeout in seconds for synchronous waiting defining how long the RPC request will block on a reply before raising :exc:`TimeoutExpiredError`.
Irrelevant for asynchronous usage.
"""
|
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Event, Lock
from uuid import uuid4
import six
from ncclient.xml_ import *
from ncclient.logging_ import SessionLoggerAdapter
from ncclient.transport import SessionListener
from ncclient.operations.errors import OperationError, TimeoutExpiredError, MissingCapabilityError
import logging
logger = logging.getLogger("ncclient.operations.rpc")
class RPCError(OperationError):
"Represents an `rpc-error`. It is a type of :exc:`OperationError` and can be raised as such."
tag_to_attr = {
qualify("error-type"): "_type",
qualify("error-tag"): "_tag",
qualify("error-severity"): "_severity",
qualify("error-info"): "_info",
qualify("error-path"): "_path",
qualify("error-message"): "_message"
}
def __init__(self, raw, errs=None):
self._raw = raw
if errs is None:
# Single RPCError
for attr in six.itervalues(RPCError.tag_to_attr):
setattr(self, attr, None)
for subele in raw:
attr = RPCError.tag_to_attr.get(subele.tag, None)
if attr is not None:
setattr(self, attr, subele.text if attr != "_info" else to_xml(subele) )
if self.message is not None:
OperationError.__init__(self, self.message)
else:
OperationError.__init__(self, self.to_dict())
else:
# Multiple errors returned. Errors is a list of RPCError objs
errlist = []
for err in errs:
if err.severity:
errsev = err.severity
else:
errsev = 'undefined'
if err.message:
errmsg = err.message
else:
errmsg = 'not an error message in the reply. Enable debug'
errordict = {"severity": errsev, "message":errmsg}
errlist.append(errordict)
# We are interested in the severity and the message
self._severity = 'warning'
self._message = "\n".join(["%s: %s" %(err['severity'].strip(), err['message'].strip()) for err in errlist])
self.errors = errs
has_error = filter(lambda higherr: higherr['severity'] == 'error', errlist)
if has_error:
self._severity = 'error'
OperationError.__init__(self, self.message)
def to_dict(self):
return dict([ (attr[1:], getattr(self, attr)) for attr in six.itervalues(RPCError.tag_to_attr) ])
@property
def xml(self):
"The `rpc-error` element as returned in XML. \
Multiple errors are returned as list of RPC errors"
return self._raw
@property
def type(self):
"The contents of the `error-type` element."
return self._type
@property
def tag(self):
"The contents of the `error-tag` element."
return self._tag
@property
def severity(self):
"The contents of the `error-severity` element."
return self._severity
@property
def path(self):
"The contents of the `error-path` element if present or `None`."
return self._path
@property
def message(self):
"The contents of the `error-message` element if present or `None`."
return self._message
@property
def info(self):
"XML string or `None`; representing the `error-info` element."
return self._info
class RPCReply(object):
"""Represents an *rpc-reply*. Only concerns itself with whether the operation was successful.
.. note::
If the reply has not yet been parsed there is an implicit, one-time parsing overhead to
accessing some of the attributes defined by this class.
"""
ERROR_CLS = RPCError
"Subclasses can specify a different error class, but it should be a subclass of `RPCError`."
def __init__(self, raw):
self._raw = raw
self._parsed = False
self._root = None
self._errors = []
def __repr__(self):
return self._raw
def parse(self):
"Parses the *rpc-reply*."
if self._parsed: return
if 'Please wait...' in str(self._raw) and 'diagnostic-information' in str(self._raw) and 'Save successfully' in str(self._raw):
# print(str(self._reply))
# res = re.sub(r"<CLI>.*</CLI>", "<ok/>", str(self._raw))
str1 = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<rpc-reply xmlns:config=\"http://www.hp.com/netconf/config:1.0\" xmlns:data=\"http://www.hp.com/netconf/data:1.0\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"urn:uuid:db6c046f-db5d-40fb-b431-7c1057feddbb\">
<ok/>
</rpc-reply>\n
'''
# res = re.sub(str(self._reply), "<ok/>", str(self._reply))
self._raw = str1
root = self._root = to_ele(self._raw) # The <rpc-reply> element
# Per RFC 4741 an <ok/> tag is sent when there are no errors or warnings
ok = root.find(qualify("ok"))
if ok is None:
# Create RPCError objects from <rpc-error> elements
error = root.find('.//'+qualify('rpc-error'))
if error is not None:
for err in root.getiterator(error.tag):
# Process a particular <rpc-error>
self._errors.append(self.ERROR_CLS(err))
self._parsing_hook(root)
self._parsed = True
def _parsing_hook(self, root):
"No-op by default. Gets passed the *root* element for the reply."
pass
@property
def xml(self):
"*rpc-reply* element as returned."
return self._raw
@property
def ok(self):
"Boolean value indicating if there were no errors."
self.parse()
return not self.errors # empty list => false
@property
def error(self):
"Returns the first :class:`RPCError` and `None` if there were no errors."
self.parse()
if self._errors:
return self._errors[0]
else:
return None
@property
def errors(self):
"List of `RPCError` objects. Will be empty if there were no *rpc-error* elements in reply."
self.parse()
return self._errors
class RPCReplyListener(SessionListener): # internal use
creation_lock = Lock()
# one instance per session -- maybe there is a better way??
def __new__(cls, session, device_handler):
with RPCReplyListener.creation_lock:
instance = session.get_listener_instance(cls)
if instance is None:
instance = object.__new__(cls)
instance._lock = Lock()
instance._id2rpc = {}
instance._device_handler = device_handler
#instance._pipelined = session.can_pipeline
session.add_listener(instance)
instance.logger = SessionLoggerAdapter(logger,
{'session': session})
return instance
def register(self, id, rpc):
with self._lock:
self._id2rpc[id] = rpc
def callback(self, root, raw):
tag, attrs = root
if self._device_handler.perform_qualify_check():
if tag != qualify("rpc-reply"):
return
if "message-id" not in attrs:
# required attribute so raise OperationError
raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
else:
id = attrs["message-id"] # get the msgid
with self._lock:
try:
rpc = self._id2rpc[id] # the corresponding rpc
self.logger.debug("Delivering to %r", rpc)
rpc.deliver_reply(raw)
except KeyError:
raise OperationError("Unknown 'message-id': %s" % id)
# no catching other exceptions, fail loudly if must
else:
# if no error delivering, can del the reference to the RPC
del self._id2rpc[id]
def errback(self, err):
try:
for rpc in six.itervalues(self._id2rpc):
rpc.deliver_error(err)
finally:
self._id2rpc.clear()
class RaiseMode(object):
"""
Define how errors indicated by RPC should be handled.
Note that any error_filters defined in the device handler will still be
applied, even if ERRORS or ALL is defined: If the filter matches, an exception
will NOT be raised.
"""
NONE = 0
"Don't attempt to raise any type of `rpc-error` as :exc:`RPCError`."
ERRORS = 1
"Raise only when the `error-type` indicates it is an honest-to-god error."
ALL = 2
"Don't look at the `error-type`, always raise."
class RPC(object):
"""Base class for all operations, directly corresponding to *rpc* requests. Handles making the request, and taking delivery of the reply."""
DEPENDS = []
"""Subclasses can specify their dependencies on capabilities as a list of URI's or abbreviated names, e.g. ':writable-running'. These are verified at the time of instantiation. If the capability is not available, :exc:`MissingCapabilityError` is raised."""
REPLY_CLS = RPCReply
"By default :class:`RPCReply`. Subclasses can specify a :class:`RPCReply` subclass."
def __init__(self, session, device_handler, async_mode=False, timeout=30, raise_mode=RaiseMode.NONE):
"""
*session* is the :class:`~ncclient.transport.Session` instance
*device_handler" is the :class:`~ncclient.devices.*.*DeviceHandler` instance
*async* specifies whether the request is to be made asynchronously, see :attr:`is_async`
*timeout* is the timeout for a synchronous request, see :attr:`timeout`
*raise_mode* specifies the exception raising mode, see :attr:`raise_mode`
"""
self._session = session
try:
for cap in self.DEPENDS:
self._assert(cap)
except AttributeError:
pass
self._async = async_mode
self._timeout = timeout
self._raise_mode = raise_mode
self._id = uuid4().urn # Keeps things simple instead of having a class attr with running ID that has to be locked
self._listener = RPCReplyListener(session, device_handler)
self._listener.register(self._id, self)
self._reply = None
self._error = None
self._event = Event()
self._device_handler = device_handler
self.logger = SessionLoggerAdapter(logger, {'session': session})
def _wrap(self, subele):
# internal use
ele = new_ele("rpc", {"message-id": self._id},
**self._device_handler.get_xml_extra_prefix_kwargs())
ele.append(subele)
#print to_xml(ele)
return to_xml(ele)
def _request(self, op):
"""Implementations of :meth:`request` call this method to send the request and process the reply.
In synchronous mode, blocks until the reply is received and returns :class:`RPCReply`. Depending on the :attr:`raise_mode` a `rpc-error` element in the reply may lead to an :exc:`RPCError` exception.
In asynchronous mode, returns immediately, returning `self`. The :attr:`event` attribute will be set when the reply has been received (see :attr:`reply`) or an error occured (see :attr:`error`).
*op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element`
"""
self.logger.info('Requesting %r', self.__class__.__name__)
req = self._wrap(op)
self._session.send(req)
if self._async:
self.logger.debug('Async request, returning %r', self)
return self
else:
self.logger.debug('Sync request, will wait for timeout=%r', self._timeout)
self._event.wait(self._timeout)
if self._event.isSet():
if self._error:
# Error that prevented reply delivery
raise self._error
self._reply.parse()
if self._reply.error is not None and not self._device_handler.is_rpc_error_exempt(self._reply.error.message):
# <rpc-error>'s [ RPCError ]
if self._raise_mode == RaiseMode.ALL or (self._raise_mode == RaiseMode.ERRORS and self._reply.error.severity == "error"):
errlist = []
errors = self._reply.errors
if len(errors) > 1:
raise RPCError(to_ele(self._reply._raw), errs=errors)
else:
raise self._reply.error
if self._device_handler.transform_reply():
return NCElement(self._reply, self._device_handler.transform_reply())
else:
return self._reply
else:
raise TimeoutExpiredError('ncclient timed out while waiting for an rpc reply.')
def request(self):
"""Subclasses must implement this method. Typically only the request needs to be built as an
:class:`~xml.etree.ElementTree.Element` and everything else can be handed off to
:meth:`_request`."""
pass
def _assert(self, capability):
"""Subclasses can use this method to verify that a capability is available with the NETCONF
server, before making a request that requires it. A :exc:`MissingCapabilityError` will be
raised if the capability is not available."""
if capability not in self._session.server_capabilities:
raise MissingCapabilityError('Server does not support [%s]' % capability)
def deliver_reply(self, raw):
# internal use
self._reply = self.REPLY_CLS(raw)
self._event.set()
def deliver_error(self, err):
# internal use
self._error = err
self._event.set()
@property
def reply(self):
":class:`RPCReply` element if reply has been received or `None`"
return self._reply
@property
def error(self):
""":exc:`Exception` type if an error occured or `None`.
.. note::
This represents an error which prevented a reply from being received. An *rpc-error*
does not fall in that category -- see `RPCReply` for that.
"""
return self._error
@property
def id(self):
"The *message-id* for this RPC."
return self._id
@property
def session(self):
"The `~ncclient.transport.Session` object associated with this RPC."
return self._session
@property
def event(self):
""":class:`~threading.Event` that is set when reply has been received or when an error preventing
delivery of the reply occurs.
"""
return self._event
def __set_async(self, async_mode=True):
self._async = async_mode
if async_mode and not self._session.can_pipeline:
raise UserWarning('Asynchronous mode not supported for this device/session')
def __set_raise_mode(self, mode):
assert(mode in (RaiseMode.NONE, RaiseMode.ERRORS, RaiseMode.ALL))
self._raise_mode = mode
def __set_timeout(self, timeout):
self._timeout = timeout
raise_mode = property(fget=lambda self: self._raise_mode, fset=__set_raise_mode)
"""Depending on this exception raising mode, an `rpc-error` in the reply may be raised as an :exc:`RPCError` exception. Valid values are the constants defined in :class:`RaiseMode`. """
is_async = property(fget=lambda self: self._async, fset=__set_async)
"""Specifies whether this RPC will be / was requested asynchronously. By default RPC's are synchronous."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Timeout in seconds for synchronous waiting defining how long the RPC request will block on a reply before raising :exc:`TimeoutExpiredError`.
Irrelevant for asynchronous usage.
"""
|
en
| 0.805149
|
# Copyright 2009 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Single RPCError # Multiple errors returned. Errors is a list of RPCError objs # We are interested in the severity and the message Represents an *rpc-reply*. Only concerns itself with whether the operation was successful. .. note:: If the reply has not yet been parsed there is an implicit, one-time parsing overhead to accessing some of the attributes defined by this class. # print(str(self._reply)) # res = re.sub(r"<CLI>.*</CLI>", "<ok/>", str(self._raw)) <?xml version=\"1.0\" encoding=\"UTF-8\"?> <rpc-reply xmlns:config=\"http://www.hp.com/netconf/config:1.0\" xmlns:data=\"http://www.hp.com/netconf/data:1.0\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\" message-id=\"urn:uuid:db6c046f-db5d-40fb-b431-7c1057feddbb\"> <ok/> </rpc-reply>\n # res = re.sub(str(self._reply), "<ok/>", str(self._reply)) # The <rpc-reply> element # Per RFC 4741 an <ok/> tag is sent when there are no errors or warnings # Create RPCError objects from <rpc-error> elements # Process a particular <rpc-error> # empty list => false # internal use # one instance per session -- maybe there is a better way?? #instance._pipelined = session.can_pipeline # required attribute so raise OperationError # get the msgid # the corresponding rpc # no catching other exceptions, fail loudly if must # if no error delivering, can del the reference to the RPC Define how errors indicated by RPC should be handled. Note that any error_filters defined in the device handler will still be applied, even if ERRORS or ALL is defined: If the filter matches, an exception will NOT be raised. Base class for all operations, directly corresponding to *rpc* requests. Handles making the request, and taking delivery of the reply. Subclasses can specify their dependencies on capabilities as a list of URI's or abbreviated names, e.g. ':writable-running'. These are verified at the time of instantiation. If the capability is not available, :exc:`MissingCapabilityError` is raised. *session* is the :class:`~ncclient.transport.Session` instance *device_handler" is the :class:`~ncclient.devices.*.*DeviceHandler` instance *async* specifies whether the request is to be made asynchronously, see :attr:`is_async` *timeout* is the timeout for a synchronous request, see :attr:`timeout` *raise_mode* specifies the exception raising mode, see :attr:`raise_mode` # Keeps things simple instead of having a class attr with running ID that has to be locked # internal use #print to_xml(ele) Implementations of :meth:`request` call this method to send the request and process the reply. In synchronous mode, blocks until the reply is received and returns :class:`RPCReply`. Depending on the :attr:`raise_mode` a `rpc-error` element in the reply may lead to an :exc:`RPCError` exception. In asynchronous mode, returns immediately, returning `self`. The :attr:`event` attribute will be set when the reply has been received (see :attr:`reply`) or an error occured (see :attr:`error`). *op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element` # Error that prevented reply delivery # <rpc-error>'s [ RPCError ] Subclasses must implement this method. Typically only the request needs to be built as an :class:`~xml.etree.ElementTree.Element` and everything else can be handed off to :meth:`_request`. Subclasses can use this method to verify that a capability is available with the NETCONF server, before making a request that requires it. A :exc:`MissingCapabilityError` will be raised if the capability is not available. # internal use # internal use :exc:`Exception` type if an error occured or `None`. .. note:: This represents an error which prevented a reply from being received. An *rpc-error* does not fall in that category -- see `RPCReply` for that. :class:`~threading.Event` that is set when reply has been received or when an error preventing delivery of the reply occurs. Depending on this exception raising mode, an `rpc-error` in the reply may be raised as an :exc:`RPCError` exception. Valid values are the constants defined in :class:`RaiseMode`. Specifies whether this RPC will be / was requested asynchronously. By default RPC's are synchronous. Timeout in seconds for synchronous waiting defining how long the RPC request will block on a reply before raising :exc:`TimeoutExpiredError`. Irrelevant for asynchronous usage.
| 1.876153
| 2
|
compound_poisson/forecast/time_series.py
|
shermanlo77/cptimeseries
| 3
|
6625381
|
"""Forecaster implementation for TimeSeries. Contain the classes
compound_poisson.forecast.time_series.Forecaster
compound_poisson.forecast.time_series.SelfForecaster
compound_poisson.forecast.forecast_abstract.Forecaster
<- compound_poisson.forecast.time_series.Forecaster
<- compound_poisson.forecast.time_series.SelfForecaster
"""
import numpy as np
from scipy import stats
from compound_poisson.forecast import distribution_compare
from compound_poisson.forecast import forecast_abstract
from compound_poisson.forecast import roc
class Forecaster(forecast_abstract.Forecaster):
"""Contain Monte Carlo forecasts
Used by the methods TimeSeries.forecast() and TimeSeries.forecast_self().
Attributes:
time_series: pointer to parent TimeSeries object
model_field: stored model fields of the test set
forecast_array: memmap of forecasts
dim 0: for each simulation
dim 1: for each time point
forecast: expectation of all forecasts, array
forecast_median: median of all forecasts
forecast_sigma: dictionary of sigma errors (quantiles) of all
forecasts,
keys are [-3, -2, -1, 0, 1, 2, 3] which correspond to the sigma
level
forecast_quartile: 3 array, each containing array of 25%, 50%, 75%
quantile forecasts
"""
def __init__(self, time_series, memmap_dir):
self.time_series = time_series
self.model_field = None
self.forecast = None
self.forecast_median = None
self.forecast_sigma = {}
self.forecast_quartile = [[], [], []]
super().__init__(memmap_dir)
# override
def make_memmap_path(self):
super().make_memmap_path(type(self.time_series).__name__)
# override
# additional parameter model_field to store the test set
def start_forecast(self, n_simulation, model_field=None):
self.model_field = model_field
if model_field is None:
self.n_time = len(self.time_series)
else:
self.n_time = len(model_field)
super().start_forecast(n_simulation)
# implemented
def copy_to_memmap(self, memmap_to_copy):
self.forecast_array[0:len(memmap_to_copy)] = memmap_to_copy[:]
# implemented
def simulate_forecasts(self, index_range, is_print=True):
for i in index_range:
forecast_i = self.get_simulated_forecast()
self.forecast_array[i] = forecast_i.y_array
if is_print:
print("Predictive sample", i)
self.time_array = forecast_i.time_array
self.get_forecast()
# implemented
def get_prob_rain(self, rain):
"""Get the probability if it will rain at least of a certian amount
Args:
rain: scalar, amount of rain to evaluate the probability
Return:
vector, a probability for each day
"""
p_rain = np.mean(self.forecast_array > rain, 0)
return p_rain
# override
# shape for memmap provided
def load_memmap(self, mode, memmap_shape=None):
"""Load the memmap file for forecast_array
Args:
mode: how to read the memmap file, eg "w+", "r+", "r"
memmap_shape: the shape of forecast_array. Provide None is
forecasting the training set
"""
if memmap_shape is None:
super().load_memmap(mode, (self.n_simulation, self.n_time))
else:
super().load_memmap(mode, memmap_shape)
def get_simulated_forecast(self):
"""Return a TimeSeries object with simulated values
"""
forecast_i = self.time_series.instantiate_forecast(self.model_field)
forecast_i.simulate()
return forecast_i
def get_forecast(self):
"""Calculate statistics over all the provided forecasts
"""
self.forecast = np.mean(self.forecast_array, 0)
sigma_array = range(-3, 4)
# work out quantiles for forecast_sigma and forecast_quartile together
quantiles = np.concatenate((stats.norm.cdf(sigma_array), [0.25, 0.75]))
forecast_quantile = np.quantile(self.forecast_array, quantiles, 0)
for i in range(len(sigma_array)):
self.forecast_sigma[sigma_array[i]] = forecast_quantile[i]
self.forecast_median = self.forecast_sigma[0]
self.forecast_quartile[0] = forecast_quantile[len(sigma_array)]
self.forecast_quartile[1] = self.forecast_median
self.forecast_quartile[2] = forecast_quantile[len(sigma_array)+1]
# implemented
def get_roc_curve(self, rain_warning, rain_true, time_index=None):
"""Return a ROC curve
Args:
rain_warning: the amount of precipitation to be detected
rain_true: observed precipitation, array, for each time point
time_index: slice object, which time points to consider
Return:
roc.Roc object, other None is returned if rain larger than
rain_warning was never observed
"""
if np.any(rain_true > rain_warning):
p_rain_warning = self.get_prob_rain(rain_warning)
if time_index is not None:
p_rain_warning = p_rain_warning[time_index]
rain_true = rain_true[time_index]
roc_curve = roc.Roc(rain_warning, p_rain_warning, rain_true)
else:
roc_curve = None
return roc_curve
# implemented
def get_roc_curve_array(
self, rain_warning_array, rain_observed, time_index=None):
"""Get array of ROC curves
Evaluate the ROC curve for different amounts of precipitation
Args:
rain_warning_array: array of amount of precipitation to be detected
rain_observed: observed precipitation, array, for each time point
time_index: optional, a pointer (eg slice or array of indices) for
time points to take ROC curve of
Return:
array of roc.Roc objects which can be None if a value of
precipitation in rain_warning_array was never observed
"""
roc_array = []
for rain_warning in rain_warning_array:
roc_curve = self.get_roc_curve(
rain_warning, rain_observed, time_index)
roc_array.append(roc_curve)
return roc_array
# implemented
def compare_dist_with_observed(
self, observed_rain, n_linspace=500):
"""Return an object from distribution_compare, used to compare the
distribution of the precipitation of the forecast and the observed
Args:
observed_rain: numpy array of observed precipitation
n_linspace: number of points to evaluate between 0 mm and max
observed rain
Return: distribution_compare.TimeSeries object
"""
comparer = distribution_compare.TimeSeries()
comparer.compare(self, observed_rain, n_linspace)
return comparer
def bootstrap(self, rng):
"""Return a bootstrapped forecast array of itself
Should be used for plotting or sensitivity analysis only
sample_array may not guarantee to be a deep copy of memmap, investigate
further if you require modifying the bootstrapped object.
"""
bootstrap = Forecaster(self.time_series, self.memmap_dir)
bootstrap.time_array = self.time_array
bootstrap.forecast_array = self.forecast_array[
rng.randint(self.n_simulation, size=self.n_simulation), :]
bootstrap.get_forecast()
return bootstrap
def __getitem__(self, index):
"""
Args:
index: slice object
"""
# only to be used for plotting purposes
# does not copy model fields
slice_copy = Forecaster(self.time_series, self.memmap_dir)
slice_copy.time_array = self.time_array[index]
if self.forecast_array is None:
slice_copy.forecast_array = None
else:
slice_copy.forecast_array = self.forecast_array[:, index]
slice_copy.forecast = self.forecast[index]
slice_copy.forecast_median = self.forecast_median[index]
slice_copy.forecast_sigma = {}
for key, forecast_sigma_i in self.forecast_sigma.items():
slice_copy.forecast_sigma[key] = forecast_sigma_i[index]
slice_copy.forecast_quartile = []
for quartile in self.forecast_quartile:
slice_copy.forecast_quartile.append(quartile[index])
slice_copy.n_time = len(slice_copy.time_array)
slice_copy.n_simulation = self.n_simulation
slice_copy.memmap_path = self.memmap_path
return slice_copy
class SelfForecaster(Forecaster):
"""For forecasting the training set
Different as the z were estimated in MCMC
"""
def __init__(self, time_series, memmap_dir):
super().__init__(time_series, memmap_dir)
# override
def start_forecast(self, n_simulation):
# implemented in such a way it passes no model fields
super().start_forecast(n_simulation)
# override
def get_simulated_forecast(self):
"""Return a TimeSeries object with simulated values, with z known
"""
forecast_i = self.time_series.instantiate_forecast_self()
forecast_i.simulate_given_z()
return forecast_i
|
"""Forecaster implementation for TimeSeries. Contain the classes
compound_poisson.forecast.time_series.Forecaster
compound_poisson.forecast.time_series.SelfForecaster
compound_poisson.forecast.forecast_abstract.Forecaster
<- compound_poisson.forecast.time_series.Forecaster
<- compound_poisson.forecast.time_series.SelfForecaster
"""
import numpy as np
from scipy import stats
from compound_poisson.forecast import distribution_compare
from compound_poisson.forecast import forecast_abstract
from compound_poisson.forecast import roc
class Forecaster(forecast_abstract.Forecaster):
"""Contain Monte Carlo forecasts
Used by the methods TimeSeries.forecast() and TimeSeries.forecast_self().
Attributes:
time_series: pointer to parent TimeSeries object
model_field: stored model fields of the test set
forecast_array: memmap of forecasts
dim 0: for each simulation
dim 1: for each time point
forecast: expectation of all forecasts, array
forecast_median: median of all forecasts
forecast_sigma: dictionary of sigma errors (quantiles) of all
forecasts,
keys are [-3, -2, -1, 0, 1, 2, 3] which correspond to the sigma
level
forecast_quartile: 3 array, each containing array of 25%, 50%, 75%
quantile forecasts
"""
def __init__(self, time_series, memmap_dir):
self.time_series = time_series
self.model_field = None
self.forecast = None
self.forecast_median = None
self.forecast_sigma = {}
self.forecast_quartile = [[], [], []]
super().__init__(memmap_dir)
# override
def make_memmap_path(self):
super().make_memmap_path(type(self.time_series).__name__)
# override
# additional parameter model_field to store the test set
def start_forecast(self, n_simulation, model_field=None):
self.model_field = model_field
if model_field is None:
self.n_time = len(self.time_series)
else:
self.n_time = len(model_field)
super().start_forecast(n_simulation)
# implemented
def copy_to_memmap(self, memmap_to_copy):
self.forecast_array[0:len(memmap_to_copy)] = memmap_to_copy[:]
# implemented
def simulate_forecasts(self, index_range, is_print=True):
for i in index_range:
forecast_i = self.get_simulated_forecast()
self.forecast_array[i] = forecast_i.y_array
if is_print:
print("Predictive sample", i)
self.time_array = forecast_i.time_array
self.get_forecast()
# implemented
def get_prob_rain(self, rain):
"""Get the probability if it will rain at least of a certian amount
Args:
rain: scalar, amount of rain to evaluate the probability
Return:
vector, a probability for each day
"""
p_rain = np.mean(self.forecast_array > rain, 0)
return p_rain
# override
# shape for memmap provided
def load_memmap(self, mode, memmap_shape=None):
"""Load the memmap file for forecast_array
Args:
mode: how to read the memmap file, eg "w+", "r+", "r"
memmap_shape: the shape of forecast_array. Provide None is
forecasting the training set
"""
if memmap_shape is None:
super().load_memmap(mode, (self.n_simulation, self.n_time))
else:
super().load_memmap(mode, memmap_shape)
def get_simulated_forecast(self):
"""Return a TimeSeries object with simulated values
"""
forecast_i = self.time_series.instantiate_forecast(self.model_field)
forecast_i.simulate()
return forecast_i
def get_forecast(self):
"""Calculate statistics over all the provided forecasts
"""
self.forecast = np.mean(self.forecast_array, 0)
sigma_array = range(-3, 4)
# work out quantiles for forecast_sigma and forecast_quartile together
quantiles = np.concatenate((stats.norm.cdf(sigma_array), [0.25, 0.75]))
forecast_quantile = np.quantile(self.forecast_array, quantiles, 0)
for i in range(len(sigma_array)):
self.forecast_sigma[sigma_array[i]] = forecast_quantile[i]
self.forecast_median = self.forecast_sigma[0]
self.forecast_quartile[0] = forecast_quantile[len(sigma_array)]
self.forecast_quartile[1] = self.forecast_median
self.forecast_quartile[2] = forecast_quantile[len(sigma_array)+1]
# implemented
def get_roc_curve(self, rain_warning, rain_true, time_index=None):
"""Return a ROC curve
Args:
rain_warning: the amount of precipitation to be detected
rain_true: observed precipitation, array, for each time point
time_index: slice object, which time points to consider
Return:
roc.Roc object, other None is returned if rain larger than
rain_warning was never observed
"""
if np.any(rain_true > rain_warning):
p_rain_warning = self.get_prob_rain(rain_warning)
if time_index is not None:
p_rain_warning = p_rain_warning[time_index]
rain_true = rain_true[time_index]
roc_curve = roc.Roc(rain_warning, p_rain_warning, rain_true)
else:
roc_curve = None
return roc_curve
# implemented
def get_roc_curve_array(
self, rain_warning_array, rain_observed, time_index=None):
"""Get array of ROC curves
Evaluate the ROC curve for different amounts of precipitation
Args:
rain_warning_array: array of amount of precipitation to be detected
rain_observed: observed precipitation, array, for each time point
time_index: optional, a pointer (eg slice or array of indices) for
time points to take ROC curve of
Return:
array of roc.Roc objects which can be None if a value of
precipitation in rain_warning_array was never observed
"""
roc_array = []
for rain_warning in rain_warning_array:
roc_curve = self.get_roc_curve(
rain_warning, rain_observed, time_index)
roc_array.append(roc_curve)
return roc_array
# implemented
def compare_dist_with_observed(
self, observed_rain, n_linspace=500):
"""Return an object from distribution_compare, used to compare the
distribution of the precipitation of the forecast and the observed
Args:
observed_rain: numpy array of observed precipitation
n_linspace: number of points to evaluate between 0 mm and max
observed rain
Return: distribution_compare.TimeSeries object
"""
comparer = distribution_compare.TimeSeries()
comparer.compare(self, observed_rain, n_linspace)
return comparer
def bootstrap(self, rng):
"""Return a bootstrapped forecast array of itself
Should be used for plotting or sensitivity analysis only
sample_array may not guarantee to be a deep copy of memmap, investigate
further if you require modifying the bootstrapped object.
"""
bootstrap = Forecaster(self.time_series, self.memmap_dir)
bootstrap.time_array = self.time_array
bootstrap.forecast_array = self.forecast_array[
rng.randint(self.n_simulation, size=self.n_simulation), :]
bootstrap.get_forecast()
return bootstrap
def __getitem__(self, index):
"""
Args:
index: slice object
"""
# only to be used for plotting purposes
# does not copy model fields
slice_copy = Forecaster(self.time_series, self.memmap_dir)
slice_copy.time_array = self.time_array[index]
if self.forecast_array is None:
slice_copy.forecast_array = None
else:
slice_copy.forecast_array = self.forecast_array[:, index]
slice_copy.forecast = self.forecast[index]
slice_copy.forecast_median = self.forecast_median[index]
slice_copy.forecast_sigma = {}
for key, forecast_sigma_i in self.forecast_sigma.items():
slice_copy.forecast_sigma[key] = forecast_sigma_i[index]
slice_copy.forecast_quartile = []
for quartile in self.forecast_quartile:
slice_copy.forecast_quartile.append(quartile[index])
slice_copy.n_time = len(slice_copy.time_array)
slice_copy.n_simulation = self.n_simulation
slice_copy.memmap_path = self.memmap_path
return slice_copy
class SelfForecaster(Forecaster):
"""For forecasting the training set
Different as the z were estimated in MCMC
"""
def __init__(self, time_series, memmap_dir):
super().__init__(time_series, memmap_dir)
# override
def start_forecast(self, n_simulation):
# implemented in such a way it passes no model fields
super().start_forecast(n_simulation)
# override
def get_simulated_forecast(self):
"""Return a TimeSeries object with simulated values, with z known
"""
forecast_i = self.time_series.instantiate_forecast_self()
forecast_i.simulate_given_z()
return forecast_i
|
en
| 0.783927
|
Forecaster implementation for TimeSeries. Contain the classes compound_poisson.forecast.time_series.Forecaster compound_poisson.forecast.time_series.SelfForecaster compound_poisson.forecast.forecast_abstract.Forecaster <- compound_poisson.forecast.time_series.Forecaster <- compound_poisson.forecast.time_series.SelfForecaster Contain Monte Carlo forecasts Used by the methods TimeSeries.forecast() and TimeSeries.forecast_self(). Attributes: time_series: pointer to parent TimeSeries object model_field: stored model fields of the test set forecast_array: memmap of forecasts dim 0: for each simulation dim 1: for each time point forecast: expectation of all forecasts, array forecast_median: median of all forecasts forecast_sigma: dictionary of sigma errors (quantiles) of all forecasts, keys are [-3, -2, -1, 0, 1, 2, 3] which correspond to the sigma level forecast_quartile: 3 array, each containing array of 25%, 50%, 75% quantile forecasts # override # override # additional parameter model_field to store the test set # implemented # implemented # implemented Get the probability if it will rain at least of a certian amount Args: rain: scalar, amount of rain to evaluate the probability Return: vector, a probability for each day # override # shape for memmap provided Load the memmap file for forecast_array Args: mode: how to read the memmap file, eg "w+", "r+", "r" memmap_shape: the shape of forecast_array. Provide None is forecasting the training set Return a TimeSeries object with simulated values Calculate statistics over all the provided forecasts # work out quantiles for forecast_sigma and forecast_quartile together # implemented Return a ROC curve Args: rain_warning: the amount of precipitation to be detected rain_true: observed precipitation, array, for each time point time_index: slice object, which time points to consider Return: roc.Roc object, other None is returned if rain larger than rain_warning was never observed # implemented Get array of ROC curves Evaluate the ROC curve for different amounts of precipitation Args: rain_warning_array: array of amount of precipitation to be detected rain_observed: observed precipitation, array, for each time point time_index: optional, a pointer (eg slice or array of indices) for time points to take ROC curve of Return: array of roc.Roc objects which can be None if a value of precipitation in rain_warning_array was never observed # implemented Return an object from distribution_compare, used to compare the distribution of the precipitation of the forecast and the observed Args: observed_rain: numpy array of observed precipitation n_linspace: number of points to evaluate between 0 mm and max observed rain Return: distribution_compare.TimeSeries object Return a bootstrapped forecast array of itself Should be used for plotting or sensitivity analysis only sample_array may not guarantee to be a deep copy of memmap, investigate further if you require modifying the bootstrapped object. Args: index: slice object # only to be used for plotting purposes # does not copy model fields For forecasting the training set Different as the z were estimated in MCMC # override # implemented in such a way it passes no model fields # override Return a TimeSeries object with simulated values, with z known
| 2.522815
| 3
|
codonPython/tests/suppression_test.py
|
wthirskgaskill/codonPython
| 1
|
6625382
|
from codonPython.suppression import suppress_value
import pytest
@pytest.mark.parametrize("to_suppress, expected", [
(0, "0"),
(2, "*"),
(5, "*"),
(8, "10"),
(16, "15"),
(57, "55"),
(10023, "10025")
])
def test_suppress_value_BAU(to_suppress, expected):
assert expected == suppress_value(to_suppress)
@pytest.mark.parametrize("to_suppress", [
-1,
4.2,
100000001
])
def test_suppress_value_valueErrors(to_suppress):
with pytest.raises(ValueError):
suppress_value(to_suppress)
|
from codonPython.suppression import suppress_value
import pytest
@pytest.mark.parametrize("to_suppress, expected", [
(0, "0"),
(2, "*"),
(5, "*"),
(8, "10"),
(16, "15"),
(57, "55"),
(10023, "10025")
])
def test_suppress_value_BAU(to_suppress, expected):
assert expected == suppress_value(to_suppress)
@pytest.mark.parametrize("to_suppress", [
-1,
4.2,
100000001
])
def test_suppress_value_valueErrors(to_suppress):
with pytest.raises(ValueError):
suppress_value(to_suppress)
|
none
| 1
| 2.441235
| 2
|
|
peering/models/models.py
|
routerfixer/peering-manager
| 0
|
6625383
|
<gh_stars>0
import ipaddress
import logging
import napalm
from cacheops import cached_as
from django.conf import settings
from django.db import models, transaction
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from netfields import InetAddressField
from net.models import Connection
from netbox.api import NetBox
from peering import call_irr_as_set_resolver, parse_irr_as_set
from peering.enums import (
BGPRelationship,
BGPState,
CommunityType,
DeviceState,
IPFamily,
RoutingPolicyType,
)
from peering.fields import ASNField, CommunityField
from peeringdb.functions import get_shared_internet_exchanges
from peeringdb.models import IXLanPrefix, Network, NetworkContact, NetworkIXLan
from utils.models import ChangeLoggedModel, TaggableModel
from .abstracts import AbstractGroup, BGPSession
from .mixins import PolicyMixin
class AutonomousSystem(ChangeLoggedModel, TaggableModel, PolicyMixin):
asn = ASNField(unique=True, verbose_name="ASN")
name = models.CharField(max_length=128)
name_peeringdb_sync = models.BooleanField(default=True)
contact_name = models.CharField(max_length=50, blank=True)
contact_phone = models.CharField(max_length=20, blank=True)
contact_email = models.EmailField(blank=True, verbose_name="Contact e-mail")
comments = models.TextField(blank=True)
irr_as_set = models.CharField(
max_length=255, blank=True, null=True, verbose_name="IRR AS-SET"
)
irr_as_set_peeringdb_sync = models.BooleanField(default=True)
ipv6_max_prefixes = models.PositiveIntegerField(
blank=True, default=0, verbose_name="IPv6 max prefix"
)
ipv6_max_prefixes_peeringdb_sync = models.BooleanField(default=True)
ipv4_max_prefixes = models.PositiveIntegerField(
blank=True, default=0, verbose_name="IPv4 max prefix"
)
ipv4_max_prefixes_peeringdb_sync = models.BooleanField(default=True)
import_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_import_routing_policies"
)
export_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_export_routing_policies"
)
communities = models.ManyToManyField("Community", blank=True)
prefixes = models.JSONField(blank=True, null=True, editable=False)
affiliated = models.BooleanField(default=False)
logger = logging.getLogger("peering.manager.peering")
class Meta:
ordering = ["asn", "affiliated"]
permissions = [("send_email", "Can send e-mails to AS contact")]
@property
def peeringdb_network(self):
try:
return Network.objects.get(asn=self.asn)
except Network.DoesNotExist:
return None
@property
def general_policy(self):
if self.peeringdb_network:
return self.peeringdb_network.policy_general
else:
return None
@property
def peeringdb_contacts(self):
if self.peeringdb_network:
return NetworkContact.objects.filter(net=self.peeringdb_network)
return []
@property
def can_receive_email(self):
return "" != self.contact_email or self.peeringdb_contacts
@staticmethod
def create_from_peeringdb(asn):
try:
network = Network.objects.get(asn=asn)
except Network.DoesNotExist:
return None
autonomous_system, _ = AutonomousSystem.objects.get_or_create(
asn=network.asn,
defaults={
"name": network.name,
"irr_as_set": network.irr_as_set,
"ipv6_max_prefixes": network.info_prefixes6,
"ipv4_max_prefixes": network.info_prefixes4,
},
)
return autonomous_system
def __str__(self):
return f"AS{self.asn} - {self.name}"
def export_policies(self):
return self.export_routing_policies.all()
def import_policies(self):
return self.import_routing_policies.all()
def get_absolute_url(self):
return reverse("peering:autonomoussystem_details", args=[self.pk])
def get_internet_exchange_peering_sessions_list_url(self):
return reverse(
"peering:autonomoussystem_internet_exchange_peering_sessions",
args=[self.pk],
)
def get_direct_peering_sessions_list_url(self):
return reverse(
"peering:autonomoussystem_direct_peering_sessions", args=[self.pk]
)
def get_direct_peering_sessions(self, bgp_group=None):
"""
Returns all direct peering sessions with this AS.
"""
return DirectPeeringSession.objects.filter(autonomous_system=self)
def get_ixp_peering_sessions(self, internet_exchange_point=None):
"""
Returns all IXP peering sessions with this AS.
"""
return InternetExchangePeeringSession.objects.filter(autonomous_system=self)
def get_internet_exchange_points(self, other):
"""
Returns all IXPs this AS is peering on (with us).
"""
return InternetExchange.objects.filter(
pk__in=Connection.objects.filter(
pk__in=self.get_ixp_peering_sessions().values_list(
"ixp_connection", flat=True
)
).values_list("internet_exchange_point", flat=True),
local_autonomous_system=other,
)
def get_shared_internet_exchange_points(self, other):
"""
Returns all IXPs this AS has with the other one.
"""
return InternetExchange.objects.filter(
peeringdb_ixlan__id__in=get_shared_internet_exchanges(
self, other
).values_list("id", flat=True),
local_autonomous_system=other,
)
def get_missing_peering_sessions(self, other, internet_exchange_point=None):
"""
Returns all missing peering sessions between this AS and the other one on a
given IXP. As we are relying on PeeringDB to discover sessions there are no
points in doing so if the IXP is not linked to a PeeringDB record.
If the IXP is not specified then missing peering sessions will be returned for
all shared IXPs between this and the other AS.
"""
if self == other:
return NetworkIXLan.objects.none()
filter = {"autonomous_system": self}
if internet_exchange_point:
filter["ixp_connection__id__in"] = Connection.objects.filter(
internet_exchange_point=internet_exchange_point
).values_list("id", flat=True)
ip_sessions = InternetExchangePeeringSession.objects.filter(
**filter
).values_list("ip_address", flat=True)
qs_filter = Q(asn=self.asn) & (
~Q(ipaddr6__in=ip_sessions) | ~Q(ipaddr4__in=ip_sessions)
)
if internet_exchange_point:
qs_filter &= Q(ixlan=internet_exchange_point.peeringdb_ixlan)
else:
qs_filter &= Q(
ixlan__in=self.get_shared_internet_exchange_points(other).values_list(
"peeringdb_ixlan", flat=True
)
)
return NetworkIXLan.objects.filter(qs_filter)
def synchronize_with_peeringdb(self):
"""
Synchronizes AS properties with those found in PeeringDB.
"""
network = self.peeringdb_network
if not network:
return False
if self.name_peeringdb_sync:
self.name = network.name
if self.irr_as_set_peeringdb_sync:
self.irr_as_set = network.irr_as_set
if self.ipv6_max_prefixes_peeringdb_sync:
self.ipv6_max_prefixes = network.info_prefixes6
if self.ipv4_max_prefixes_peeringdb_sync:
self.ipv4_max_prefixes = network.info_prefixes4
try:
self.save()
return True
except Exception:
return False
def retrieve_irr_as_set_prefixes(self):
"""
Returns a prefix list for this AS' IRR AS-SET. If none is provided the
function will try to look for a prefix list based on the AS number.
This function will actually retrieve prefixes from IRR online sources. It is
expected to be slow due to network operations and depending on the size of the
data to process.
"""
fallback = False
as_sets = parse_irr_as_set(self.asn, self.irr_as_set)
prefixes = {"ipv6": [], "ipv4": []}
try:
# For each AS-SET try getting IPv6 and IPv4 prefixes
for as_set in as_sets:
prefixes["ipv6"].extend(
call_irr_as_set_resolver(as_set, address_family=6)
)
prefixes["ipv4"].extend(
call_irr_as_set_resolver(as_set, address_family=4)
)
except ValueError:
# Error parsing AS-SETs
fallback = True
# If fallback is triggered or no prefixes found, try prefix lookup by ASN
if fallback or not prefixes["ipv6"] and not prefixes["ipv4"]:
self.logger.debug(
f"falling back to AS number lookup to search for {self.asn} prefixes"
)
prefixes["ipv6"].extend(
call_irr_as_set_resolver(f"AS{self.asn}", address_family=6)
)
prefixes["ipv4"].extend(
call_irr_as_set_resolver(f"AS{self.asn}", address_family=4)
)
return prefixes
def get_irr_as_set_prefixes(self, address_family=0):
"""
Returns a prefix list for this AS' IRR AS-SET. If none is provided the list
will be empty.
If specified, only a list of the prefixes for the given address family will be
returned. 6 for IPv6, 4 for IPv4, both for all other values.
The stored database value will be used if it exists.
"""
prefixes = (
self.prefixes if self.prefixes else self.retrieve_irr_as_set_prefixes()
)
if address_family == 6:
return prefixes["ipv6"]
elif address_family == 4:
return prefixes["ipv4"]
else:
return prefixes
def get_contact_email_addresses(self):
"""
Returns a list of all contacts with their respective e-mails addresses.
The returned list can be used in form choice fields.
"""
addresses = []
# Append the contact set by the user if one has been set
if self.contact_email:
addresses.append(
(
self.contact_email,
f"{self.contact_name} - {self.contact_email}"
if self.contact_name
else self.contact_email,
)
)
# Append the contacts found in PeeringDB, avoid re-adding a contact if the
# email address is the same as the one set by the user manually
for contact in self.peeringdb_contacts:
if contact.email and contact.email not in [a[0] for a in addresses]:
addresses.append(
(
contact.email,
f"{contact.name} - {contact.email}"
if contact.name
else contact.email,
)
)
return addresses
def get_cc_email_contacts(self):
"""
Returns a list of user defined CC contacts from settings
"""
addresses = []
# Extract user defined addresses
for email in settings.EMAIL_CC_CONTACTS:
if isinstance(email, tuple):
addresses.append(
(
email[0],
f"{email[1]} - {email[0]}" if len(email) > 1 else email[0],
)
)
else:
addresses.append(
(
email,
email,
)
)
return addresses
def get_email_context(self):
"""
Returns a dict, to be used in a Jinja2 environment, that holds enough data to
help in creating an e-mail from a template.
"""
affiliated = AutonomousSystem.objects.filter(affiliated=True)
return {"affiliated_autonomous_systems": affiliated, "autonomous_system": self}
def generate_email(self, email):
"""
Renders an e-mail from a template.
"""
return email.render(self.get_email_context())
class BGPGroup(AbstractGroup):
logger = logging.getLogger("peering.manager.peering")
class Meta(AbstractGroup.Meta):
ordering = ["name", "slug"]
verbose_name = "BGP group"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:bgpgroup_details", args=[self.pk])
def get_peering_sessions_list_url(self):
return reverse("peering:bgpgroup_peering_sessions", args=[self.pk])
def get_peering_sessions(self):
return DirectPeeringSession.objects.filter(bgp_group=self)
def poll_peering_sessions(self):
if not self.check_bgp_session_states:
self.logger.debug(
'ignoring session states for %s, reason: "check disabled"',
self.name.lower(),
)
return False
peering_sessions = DirectPeeringSession.objects.prefetch_related(
"router"
).filter(bgp_group=self)
if not peering_sessions:
# Empty result no need to go further
return False
# Get BGP neighbors details from router, but only get them once
bgp_neighbors_detail = {}
for session in peering_sessions:
if session.router not in bgp_neighbors_detail:
detail = session.router.get_bgp_neighbors_detail()
bgp_neighbors_detail.update(
{
session.router: session.router.bgp_neighbors_detail_as_list(
detail
)
}
)
if not bgp_neighbors_detail:
# Empty result no need to go further
return False
with transaction.atomic():
for router, detail in bgp_neighbors_detail.items():
for session in detail:
ip_address = session["remote_address"]
self.logger.debug(
"looking for session %s in %s", ip_address, self.name.lower()
)
try:
peering_session = DirectPeeringSession.objects.get(
ip_address=ip_address, bgp_group=self, router=router
)
# Get info that we are actually looking for
state = session["connection_state"].lower()
received = session["received_prefix_count"]
advertised = session["advertised_prefix_count"]
self.logger.debug(
"found session %s in %s with state %s",
ip_address,
self.name.lower(),
state,
)
# Update fields
peering_session.bgp_state = state
peering_session.received_prefix_count = (
0 if received < 0 else received
)
peering_session.advertised_prefix_count = (
0 if advertised < 0 else advertised
)
# Update the BGP state of the session
if peering_session.bgp_state == BGPState.ESTABLISHED:
peering_session.last_established_state = timezone.now()
peering_session.save()
except DirectPeeringSession.DoesNotExist:
self.logger.debug(
"session %s in %s not found", ip_address, self.name.lower()
)
# Save last session states update
self.bgp_session_states_update = timezone.now()
self.save()
return True
class Community(ChangeLoggedModel, TaggableModel):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True, max_length=255)
value = CommunityField(max_length=50)
type = models.CharField(
max_length=50, choices=CommunityType.choices, default=CommunityType.INGRESS
)
comments = models.TextField(blank=True)
class Meta:
verbose_name_plural = "communities"
ordering = ["value", "name"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:community_details", args=[self.pk])
def get_type_html(self):
if self.type == CommunityType.EGRESS:
badge_type = "badge-primary"
text = self.get_type_display()
elif self.type == CommunityType.INGRESS:
badge_type = "badge-info"
text = self.get_type_display()
else:
badge_type = "badge-secondary"
text = "Unknown"
return mark_safe(f'<span class="badge {badge_type}">{text}</span>')
class DirectPeeringSession(BGPSession):
local_autonomous_system = models.ForeignKey(
"AutonomousSystem",
on_delete=models.CASCADE,
related_name="%(class)s_local_autonomous_system",
null=True,
)
local_ip_address = InetAddressField(
store_prefix_length=False,
blank=True,
null=True,
verbose_name="Local IP address",
)
bgp_group = models.ForeignKey(
"BGPGroup",
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name="BGP group",
)
relationship = models.CharField(
max_length=50,
choices=BGPRelationship.choices,
help_text="Relationship with the remote peer.",
)
router = models.ForeignKey(
"Router", blank=True, null=True, on_delete=models.SET_NULL
)
class Meta(BGPSession.Meta):
ordering = [
"service_reference",
"local_autonomous_system",
"autonomous_system",
"ip_address",
]
def __str__(self):
return f"{self.get_relationship_display()} - AS{self.autonomous_system.asn} - IP {self.ip_address}"
def get_absolute_url(self):
return reverse("peering:directpeeringsession_details", args=[self.pk])
def poll(self):
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if not self.router or not self.router.platform:
log = log.format(str(self.ip_address).lower(), "no usable router attached")
elif self.bgp_group and not self.bgp_group.check_bgp_session_states:
log = log.format(self.name.lower(), "check disabled")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
# Get BGP session detail
bgp_neighbor_detail = self.router.get_bgp_neighbors_detail(
ip_address=self.ip_address
)
if bgp_neighbor_detail:
received = bgp_neighbor_detail["received_prefix_count"]
advertised = bgp_neighbor_detail["advertised_prefix_count"]
# Update fields
self.bgp_state = bgp_neighbor_detail["connection_state"].lower()
self.received_prefix_count = received if received > 0 else 0
self.advertised_prefix_count = advertised if advertised > 0 else 0
if self.bgp_state == BGPState.ESTABLISHED:
self.last_established_state = timezone.now()
self.save()
return True
return False
def get_relationship_html(self):
if self.relationship == BGPRelationship.CUSTOMER:
badge_type = "badge-danger"
elif self.relationship == BGPRelationship.PRIVATE_PEERING:
badge_type = "badge-success"
elif self.relationship == BGPRelationship.TRANSIT_PROVIDER:
badge_type = "badge-primary"
else:
badge_type = "badge-secondary"
return mark_safe(
f'<span class="badge {badge_type}">{self.get_relationship_display()}</span>'
)
class InternetExchange(AbstractGroup):
peeringdb_ixlan = models.ForeignKey(
"peeringdb.IXLan", on_delete=models.SET_NULL, blank=True, null=True
)
local_autonomous_system = models.ForeignKey(
"AutonomousSystem", on_delete=models.CASCADE, null=True
)
logger = logging.getLogger("peering.manager.peering")
class Meta(AbstractGroup.Meta):
ordering = ["local_autonomous_system", "name", "slug"]
@property
def linked_to_peeringdb(self):
"""
Tells if the PeeringDB object for this IX still exists.
"""
return self.peeringdb_ixlan is not None
@property
def has_connected_routers(self):
return (
Connection.objects.filter(
internet_exchange_point=self, router__isnull=False
).count()
> 0
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:internetexchange_details", args=[self.pk])
def get_peering_sessions_list_url(self):
return reverse("peering:internetexchange_peering_sessions", args=[self.pk])
def get_peer_list_url(self):
return reverse("peering:internet_exchange_peers", args=[self.pk])
def merged_export_policies(self, reverse=False):
# Get own policies
policies = [p for p in self.export_policies()]
return list(reversed(policies)) if reverse else policies
def merged_import_policies(self, reverse=False):
# Get own policies
policies = [p for p in self.import_policies()]
return list(reversed(policies)) if reverse else policies
def link_to_peeringdb(self):
"""
Retrieves the PeeringDB IDs for this IX based on connections.
The PeeringDB records will be returned on success. In any other cases `None`
will be returned. The value will also be saved in model's field.
"""
peeringdb_ixlan = None
for connection in Connection.objects.filter(internet_exchange_point=self):
# For each connection, try to see if a valid PeeringDB record exists and
# make sure that they all point towards the same IX
if connection.linked_to_peeringdb:
if peeringdb_ixlan is None:
peeringdb_ixlan = connection.peeringdb_netixlan.ixlan
else:
if peeringdb_ixlan != connection.peeringdb_netixlan.ixlan:
# Connections not belonging to the same IX
return None
if peeringdb_ixlan is not None:
self.peeringdb_ixlan = peeringdb_ixlan
self.save()
return peeringdb_ixlan
def get_prefixes(self):
"""
Returns all prefixes found (in PeeringDB) for this IXP.
"""
if not self.linked_to_peeringdb:
return IXLanPrefix.objects.none()
return IXLanPrefix.objects.filter(ixlan=self.peeringdb_ixlan)
def get_connections(self):
"""
Returns all connections to this IXP.
"""
return Connection.objects.filter(internet_exchange_point=self)
def get_routers(self):
return Router.objects.filter(
pk__in=self.get_connections().values_list("router", flat=True)
)
def get_peering_sessions(self):
"""
Returns all peering sessions setup over this IXP.
"""
return InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections()
)
def get_autonomous_systems(self):
"""
Returns all autonomous systems with setup peering sessions over this IXP.
"""
return AutonomousSystem.objects.filter(
pk__in=self.get_peering_sessions().values_list(
"autonomous_system", flat=True
)
)
def get_available_peers(self):
"""
Finds available peers for the AS connected to this IX.
"""
# Not linked to PeeringDB, cannot determine peers
if not self.linked_to_peeringdb:
return NetworkIXLan.objects.none()
# Get all peering sessions currently existing
existing_sessions = self.get_peering_sessions()
ipv6_sessions = []
ipv4_sessions = []
for session in existing_sessions:
ip = ipaddress.ip_address(session.ip_address)
if ip.version == 6:
ipv6_sessions.append(ip)
elif ip.version == 4:
ipv4_sessions.append(ip)
else:
self.logger.debug(f"peering session with strange ip: {ip}")
return NetworkIXLan.objects.filter(
~Q(asn=self.local_autonomous_system.asn)
& Q(ixlan=self.peeringdb_ixlan)
& (~Q(ipaddr6__in=ipv6_sessions) | ~Q(ipaddr4__in=ipv4_sessions))
).order_by("asn")
@transaction.atomic
def poll_peering_sessions(self):
# Get connected routers to this IXP
connected_routers = self.get_routers()
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if connected_routers.count() < 0:
log = log.format(self.name.lower(), "no routers connected")
elif not self.check_bgp_session_states:
log = log.format(self.name.lower(), "check disabled")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
for router in connected_routers:
# Get all BGP sessions detail
bgp_neighbors_detail = router.get_bgp_neighbors_detail()
# An error occured, probably
if not bgp_neighbors_detail:
return False
for _, as_details in bgp_neighbors_detail.items():
for _, sessions in as_details.items():
# Check BGP sessions found
for session in sessions:
ip_address = session["remote_address"]
self.logger.debug(
f"looking for session {ip_address} in {self.name.lower()}"
)
# Check if the BGP session is on this IX
try:
ip = ipaddress.ip_address(ip_address)
lookup = {"ip_address": ip_address}
for connection in router.get_connections():
# Limit scope to address in connection's subnets
if (
ip.version == 4
and ip in connection.ipv4_address.network
) or (
ip.version == 6
and ip in connection.ipv6_address.network
):
lookup["ixp_connection"] = connection
break
ixp_session = InternetExchangePeeringSession.objects.get(
**lookup
)
# Get the BGP state for the session
state = session["connection_state"].lower()
received = session["received_prefix_count"]
advertised = session["advertised_prefix_count"]
self.logger.debug(
f"found session {ip_address} in {self.name.lower()} with state {state}"
)
# Update fields
ixp_session.bgp_state = state
ixp_session.received_prefix_count = (
0 if received < 0 else received
)
ixp_session.advertised_prefix_count = (
0 if advertised < 0 else advertised
)
# Update the BGP state of the session
if ixp_session.bgp_state == BGPState.ESTABLISHED:
ixp_session.last_established_state = timezone.now()
ixp_session.save()
except InternetExchangePeeringSession.DoesNotExist:
self.logger.debug(
f"session {ip_address} in {self.name.lower()} not found"
)
# Save last session states update
self.bgp_session_states_update = timezone.now()
self.save()
return True
@transaction.atomic
def import_sessions(self, connection):
"""
Imports sessions setup on a connected router.
"""
session_number, asn_number = 0, 0
ignored_autonomous_systems = []
allowed_prefixes = self.get_prefixes()
sessions = connection.router.get_bgp_neighbors()
def is_valid(ip_address):
for p in allowed_prefixes:
if p.prefix.version == ip_address.version:
if ip_address in p.prefix:
return True
return False
for session in sessions:
ip = ipaddress.ip_address(session["ip_address"])
if not is_valid(ip):
self.logger.debug(
f"ignoring ixp session, {str(ip)} does not fit in any prefixes"
)
continue
self.logger.debug(f"processing ixp session {str(ip)}")
remote_asn = session["remote_asn"]
try:
InternetExchangePeeringSession.objects.get(
ixp_connection=connection, ip_address=ip
)
self.logger.debug(
f"ixp session {str(ip)} with as{remote_asn} already exists"
)
continue
except InternetExchangePeeringSession.DoesNotExist:
self.logger.debug(
f"ixp session {str(ip)} with as{remote_asn} does not exist"
)
# Get the AS, create it if needed
autonomous_system = AutonomousSystem.create_from_peeringdb(remote_asn)
# Do not count the AS if it does not have a PeeringDB record
if autonomous_system:
self.logger.debug(f"as{remote_asn} created")
asn_number += 1
else:
if remote_asn not in ignored_autonomous_systems:
ignored_autonomous_systems.append(remote_asn)
self.logger.debug(
f"could not create as{remote_asn}, session {str(ip)} ignored"
)
# Only add a session if we can use the AS it is linked to
if autonomous_system:
self.logger.debug(f"creating session {str(ip)}")
InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system,
ixp_connection=connection,
ip_address=ip,
)
session_number += 1
self.logger.debug(f"session {str(ip)} created")
return session_number, asn_number
class InternetExchangePeeringSession(BGPSession):
ixp_connection = models.ForeignKey(
"net.Connection",
on_delete=models.CASCADE,
null=True,
verbose_name="IXP connection",
)
is_route_server = models.BooleanField(
blank=True, default=False, verbose_name="Route server"
)
class Meta(BGPSession.Meta):
ordering = [
"service_reference",
"autonomous_system",
"ixp_connection",
"ip_address",
]
@staticmethod
def create_from_peeringdb(affiliated, internet_exchange, netixlan):
results = []
if not netixlan:
return results
# If the IXP is not given, e.g. we are in the provisionning section, try to
# guess the IXP from the PeeringDB record
if not internet_exchange:
internet_exchange = InternetExchange.objects.filter(
local_autonomous_system=affiliated, peeringdb_ixlan=netixlan.ixlan
).first()
available_connections = Connection.objects.filter(
internet_exchange_point=internet_exchange
)
for connection in available_connections:
for version in (6, 4):
ip_address = getattr(netixlan, f"ipaddr{version}", None)
if not ip_address:
continue
params = {
"autonomous_system": AutonomousSystem.create_from_peeringdb(
netixlan.asn
),
"ixp_connection": connection,
"ip_address": ip_address.ip,
}
try:
# Try to get the session, in case it already exists
InternetExchangePeeringSession.objects.get(**params)
except InternetExchangePeeringSession.DoesNotExist:
results.append(InternetExchangePeeringSession(**params))
return results
def __str__(self):
if not self.ixp_connection:
return f"AS{self.autonomous_system.asn} - IP {self.ip_address}"
return f"{self.ixp_connection.internet_exchange_point.name} - AS{self.autonomous_system.asn} - IP {self.ip_address}"
def get_absolute_url(self):
return reverse("peering:internetexchangepeeringsession_details", args=[self.pk])
def poll(self):
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if not self.ixp_connection.router or not self.ixp_connection.router.platform:
log = log.format(str(self.ip_address).lower(), "no usable router attached")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
# Get BGP session detail
bgp_neighbor_detail = self.ixp_connection.router.get_bgp_neighbors_detail(
ip_address=self.ip_address
)
if bgp_neighbor_detail:
received = bgp_neighbor_detail["received_prefix_count"]
advertised = bgp_neighbor_detail["advertised_prefix_count"]
# Update fields
self.bgp_state = bgp_neighbor_detail["connection_state"].lower()
self.received_prefix_count = received if received > 0 else 0
self.advertised_prefix_count = advertised if advertised > 0 else 0
if self.bgp_state == BGPState.ESTABLISHED:
self.last_established_state = timezone.now()
self.save()
return True
return False
def exists_in_peeringdb(self):
"""
Returns `True` if a NetworkIXLan exists for this session's IP.
"""
if isinstance(self.ip_address, str):
ip_version = ipaddress.ip_address(self.ip_address).version
else:
ip_version = self.ip_address.version
try:
NetworkIXLan.objects.get(**{f"ipaddr{ip_version}": str(self.ip_address)})
return True
except NetworkIXLan.DoesNotExist:
pass
return False
def is_abandoned(self):
"""
Returns True if a session is considered as abandoned. Returns False otherwise.
A session is *not* considered as abandoned if it matches one of the following
criteria:
* The Internet Exchange is not linked to a PeeringDB record
* User does not poll peering session states
* The peer AS has no cached PeeringDB record
* The peer AS has a cached PeeringDB record with the session IP address
* The BGP state for the session is not idle or active
"""
if (
not self.ixp_connection.linked_to_peeringdb
or not self.ixp_connection.internet_exchange_point.check_bgp_session_states
or not self.autonomous_system.peeringdb_network
or self.exists_in_peeringdb()
or self.bgp_state not in [BGPState.IDLE, BGPState.ACTIVE]
):
return False
return True
class Router(ChangeLoggedModel, TaggableModel):
local_autonomous_system = models.ForeignKey(
"AutonomousSystem", on_delete=models.CASCADE, null=True
)
name = models.CharField(max_length=128)
hostname = models.CharField(max_length=256)
platform = models.ForeignKey(
"devices.Platform",
on_delete=models.PROTECT,
blank=True,
null=True,
help_text="The router platform, used to interact with it",
)
encrypt_passwords = models.BooleanField(
blank=True,
default=False,
help_text="Try to encrypt passwords for peering sessions",
)
configuration_template = models.ForeignKey(
"Configuration", blank=True, null=True, on_delete=models.SET_NULL
)
device_state = models.CharField(
max_length=20,
choices=DeviceState.choices,
default=DeviceState.ENABLED,
blank=True,
help_text="State of the device for configuration pushes",
)
netbox_device_id = models.PositiveIntegerField(
blank=True, default=0, verbose_name="NetBox device"
)
use_netbox = models.BooleanField(
blank=True,
default=False,
help_text="Use NetBox to communicate instead of NAPALM",
)
config_context = models.JSONField(blank=True, null=True)
napalm_username = models.CharField(blank=True, null=True, max_length=256)
napalm_password = models.CharField(blank=True, null=True, max_length=256)
napalm_timeout = models.PositiveIntegerField(blank=True, default=0)
napalm_args = models.JSONField(blank=True, null=True)
comments = models.TextField(blank=True)
logger = logging.getLogger("peering.manager.napalm")
class Meta:
ordering = ["local_autonomous_system", "name"]
permissions = [
("view_router_configuration", "Can view router's configuration"),
("deploy_router_configuration", "Can deploy router's configuration"),
]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:router_details", args=[self.pk])
def get_direct_peering_sessions_list_url(self):
return reverse("peering:router_direct_peering_sessions", args=[self.pk])
def is_netbox_device(self):
return self.netbox_device_id != 0
def is_usable_for_task(self, job_result=None, logger=None):
"""
Performs pre-flight checks to understand if a router is suited for background
task processing.
"""
if logger is None:
logger = self.logger
# Ensure device is not in disabled state
if self.device_state == DeviceState.DISABLED:
if job_result:
job_result.mark_errored(
"Router is not enabled.", obj=self, logger=logger
)
job_result.save()
return False
# Check if the router runs on a supported platform
if not self.platform:
if job_result:
job_result.mark_errored(
"Router has no assigned platform.", obj=self, logger=logger
)
job_result.save()
return False
if not self.platform.napalm_driver:
if job_result:
job_result.mark_errored(
"Router's platform has no NAPALM driver.", obj=self, logger=logger
)
job_result.save()
return False
return True
def get_bgp_groups(self):
"""
Returns BGP groups that can be deployed on this router.
A group is considered as deployable on a router if direct peering sessions in
the group are also attached to the router.
"""
return BGPGroup.objects.filter(
pk__in=DirectPeeringSession.objects.filter(router=self).values_list(
"bgp_group", flat=True
)
)
def get_connections(self, internet_exchange_point=None):
"""
Returns connections attached to this router.
"""
if internet_exchange_point:
return Connection.objects.filter(
internet_exchange_point=internet_exchange_point, router=self
)
else:
return Connection.objects.filter(router=self)
def get_internet_exchange_points(self):
"""
Returns IXPs that this router is connected to.
"""
return InternetExchange.objects.filter(
pk__in=self.get_connections().values_list(
"internet_exchange_point", flat=True
)
)
def get_direct_autonomous_systems(self, bgp_group=None):
"""
Returns autonomous systems that are directly peered with this router.
"""
if bgp_group:
sessions = DirectPeeringSession.objects.filter(
bgp_group=bgp_group, router=self
).values_list("autonomous_system", flat=True)
else:
sessions = DirectPeeringSession.objects.filter(router=self).values_list(
"autonomous_system", flat=True
)
return AutonomousSystem.objects.filter(pk__in=sessions)
def get_ixp_autonomous_systems(self, internet_exchange_point=None):
"""
Returns autonomous systems with which this router peers over IXPs.
"""
return AutonomousSystem.objects.filter(
pk__in=InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections(
internet_exchange_point=internet_exchange_point
)
).values_list("autonomous_system", flat=True)
)
def get_autonomous_systems(self):
"""
Returns all autonomous systems with which this router peers.
"""
return self.get_direct_autonomous_systems().union(
self.get_ixp_autonomous_systems()
)
def get_direct_peering_sessions(self, bgp_group=None):
"""
Returns all direct peering sessions setup on this router.
"""
if bgp_group:
return DirectPeeringSession.objects.filter(bgp_group=bgp_group, router=self)
else:
return DirectPeeringSession.objects.filter(router=self)
def get_ixp_peering_sessions(self, internet_exchange_point=None):
"""
Returns all IXP peering sessions setup on this router.
"""
return InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections(
internet_exchange_point=internet_exchange_point
)
)
def get_configuration_context(self):
"""
Returns a dict, to be used in a Jinja2 environment, that holds enough data to
help in creating a configuration from a template.
"""
return {
"autonomous_systems": self.get_autonomous_systems(),
"bgp_groups": self.get_bgp_groups(),
"communities": Community.objects.all(),
"internet_exchange_points": self.get_internet_exchange_points(),
"local_as": self.local_autonomous_system,
"routing_policies": RoutingPolicy.objects.all(),
"router": self,
}
def generate_configuration(self):
"""
Returns the configuration of a router according to the template in use.
If no template is used, an empty string is returned.
"""
if self.configuration_template:
context = self.get_configuration_context()
return self.configuration_template.render(context)
else:
return ""
def get_napalm_device(self):
"""
Returns an instance of the NAPALM driver to connect to a router.
"""
if not self.platform or not self.platform.napalm_driver:
self.logger.debug("no napalm driver defined")
return None
self.logger.debug(f"looking for napalm driver '{self.platform.napalm_driver}'")
try:
# Driver found, instanciate it
driver = napalm.get_network_driver(self.platform.napalm_driver)
self.logger.debug(f"found napalm driver '{self.platform.napalm_driver}'")
# Merge NAPALM args: first global, then platform's, finish with router's
args = settings.NAPALM_ARGS
if self.platform.napalm_args:
args.update(self.platform.napalm_args)
if self.napalm_args:
args.update(self.napalm_args)
return driver(
hostname=self.hostname,
username=self.napalm_username or settings.NAPALM_USERNAME,
password=<PASSWORD>.<PASSWORD>_password or settings.NAPALM_PASSWORD,
timeout=self.napalm_timeout or settings.NAPALM_TIMEOUT,
optional_args=args,
)
except napalm.base.exceptions.ModuleImportError:
# Unable to import proper driver from napalm
# Most probably due to a broken install
self.logger.error(
f"no napalm driver: '{self.platform.napalm_driver}' for platform: '{self.platform}' found (not installed or does not exist)"
)
return None
def open_napalm_device(self, device):
"""
Opens a connection with a device using NAPALM.
This method returns True if the connection is properly opened or False
in any other cases. It handles exceptions that can occur during the
connection opening process by itself.
It is a wrapper method mostly used for logging purpose.
"""
success = False
if not device:
return success
try:
self.logger.debug(f"connecting to {self.hostname}")
device.open()
except napalm.base.exceptions.ConnectionException as e:
self.logger.error(
f'error while trying to connect to {self.hostname} reason "{e}"'
)
except Exception:
self.logger.error(f"error while trying to connect to {self.hostname}")
else:
self.logger.debug(f"successfully connected to {self.hostname}")
success = True
finally:
return success
def close_napalm_device(self, device):
"""
Closes a connection with a device using NAPALM.
This method returns True if the connection is properly closed or False
if the device is not valid.
It is a wrapper method mostly used for logging purpose.
"""
if not device:
return False
device.close()
self.logger.debug(f"closing connection with {self.hostname}")
return True
def test_napalm_connection(self):
"""
Opens and closes a connection with a device using NAPALM to see if it
is possible to interact with it.
This method returns True only if the connection opening and closing are
both successful.
"""
opened, alive, closed = False, False, False
device = self.get_napalm_device()
# Open and close the test_napalm_connection
self.logger.debug(f"testing connection with {self.hostname}")
opened = self.open_napalm_device(device)
if opened:
alive = device.is_alive()
if alive:
closed = self.close_napalm_device(device)
# Issue while opening or closing the connection
if not opened or not closed or not alive:
self.logger.error(
f"cannot connect to {self.hostname}, napalm functions won't work"
)
return opened and closed and alive
def set_napalm_configuration(self, config, commit=False):
"""
Tries to merge a given configuration on a device using NAPALM.
This methods returns the changes applied to the configuration if the
merge was successful. It will return None in any other cases.
The optional named argument 'commit' is a boolean which is used to
know if the changes must be commited or discarded. The default value is
False which means that the changes will be discarded.
"""
error, changes = None, None
# Ensure device is enabled, we allow maintenance mode to force a config push
if self.device_state == DeviceState.DISABLED:
self.logger.debug(f"device: {self.name} is disabled, exiting config push")
return "device is disabled, cannot deploy config", changes
# Make sure there actually a configuration to merge
if config is None or not isinstance(config, str) or not config.strip():
self.logger.debug(f"no configuration to merge: {config}")
error = "no configuration found to be merged"
return error, changes
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
try:
# Load the config
self.logger.debug(f"merging configuration on {self.hostname}")
device.load_merge_candidate(config=config)
self.logger.debug(f"merged configuration\n{config}")
# Get the config diff
self.logger.debug(
f"checking for configuration changes on {self.hostname}"
)
changes = device.compare_config()
self.logger.debug(f"raw napalm output\n{changes}")
# Commit the config if required
if commit:
self.logger.debug(f"commiting configuration on {self.hostname}")
device.commit_config()
else:
self.logger.debug(f"discarding configuration on {self.hostname}")
device.discard_config()
except napalm.base.exceptions.MergeConfigException as e:
error = f'unable to merge configuration on {self.hostname} reason "{e}"'
changes = None
self.logger.debug(error)
except Exception as e:
error = f'unable to merge configuration on {self.hostname} reason "{e}"'
changes = None
self.logger.debug(error)
else:
self.logger.debug(
f"successfully merged configuration on {self.hostname}"
)
finally:
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
f"error while closing connection with {self.hostname}"
)
else:
error = f"unable to connect to {self.hostname}"
return error, changes
def _napalm_bgp_neighbors_to_peer_list(self, napalm_dict):
bgp_peers = []
if not napalm_dict:
return bgp_peers
# For each VRF
for vrf in napalm_dict:
# Get peers inside it
peers = napalm_dict[vrf]["peers"]
self.logger.debug(
"found %s bgp neighbors in %s vrf on %s", len(peers), vrf, self.hostname
)
# For each peer handle its IP address and the needed details
for ip, details in peers.items():
if "remote_as" not in details:
self.logger.debug(
"ignored bgp neighbor %s in %s vrf on %s",
ip,
vrf,
self.hostname,
)
elif ip in [str(i["ip_address"]) for i in bgp_peers]:
self.logger.debug(
"duplicate bgp neighbor %s on %s", ip, self.hostname
)
else:
try:
# Save the BGP session (IP and remote ASN)
bgp_peers.append(
{
"ip_address": ipaddress.ip_address(ip),
"remote_asn": details["remote_as"],
}
)
except ValueError as e:
# Error while parsing the IP address
self.logger.error(
'ignored bgp neighbor %s in %s vrf on %s reason "%s"',
ip,
vrf,
self.hostname,
e,
)
# Force next iteration
continue
return bgp_peers
def get_napalm_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NAPALM.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_sessions = []
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
# Get all BGP neighbors on the router
self.logger.debug("getting bgp neighbors on %s", self.hostname)
bgp_neighbors = device.get_bgp_neighbors()
self.logger.debug("raw napalm output %s", bgp_neighbors)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors),
self.hostname,
)
bgp_sessions = self._napalm_bgp_neighbors_to_peer_list(bgp_neighbors)
self.logger.debug(
"found %s bgp neighbors on %s", len(bgp_sessions), self.hostname
)
# Close connection to the device
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
"error while closing connection with %s", self.hostname
)
return bgp_sessions
def get_netbox_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NetBox.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_sessions = []
self.logger.debug("getting bgp neighbors on %s", self.hostname)
bgp_neighbors = NetBox().napalm(self.netbox_device_id, "get_bgp_neighbors")
self.logger.debug("raw napalm output %s", bgp_neighbors)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s", len(bgp_neighbors), self.hostname
)
bgp_sessions = self._napalm_bgp_neighbors_to_peer_list(bgp_neighbors)
self.logger.debug(
"found %s bgp neighbors on %s", len(bgp_sessions), self.hostname
)
return bgp_sessions
def get_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using either NAPALM or NetBox based on the use_netbox flag.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
if self.use_netbox:
return self.get_netbox_bgp_neighbors()
else:
return self.get_napalm_bgp_neighbors()
def find_bgp_neighbor_detail(self, bgp_neighbors, ip_address):
"""
Finds and returns a single BGP neighbor amongst others.
"""
# NAPALM dict expected
if not isinstance(bgp_neighbors, dict):
return None
# Make sure to use an IP object
if isinstance(ip_address, str):
ip_address = ipaddress.ip_address(ip_address)
for _, asn in bgp_neighbors.items():
for _, neighbors in asn.items():
for neighbor in neighbors:
neighbor_ip_address = ipaddress.ip_address(
neighbor["remote_address"]
)
if ip_address == neighbor_ip_address:
return neighbor
return None
def get_napalm_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NAPALM and there respective detail.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_neighbors_detail = []
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
# Get all BGP neighbors on the router
self.logger.debug("getting bgp neighbors detail on %s", self.hostname)
bgp_neighbors_detail = device.get_bgp_neighbors_detail()
self.logger.debug("raw napalm output %s", bgp_neighbors_detail)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors_detail),
self.hostname,
)
# Close connection to the device
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
"error while closing connection with %s", self.hostname
)
return (
bgp_neighbors_detail
if not ip_address
else self.find_bgp_neighbor_detail(bgp_neighbors_detail, ip_address)
)
def get_netbox_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NetBox and their respective detail.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_neighbors_detail = []
self.logger.debug("getting bgp neighbors detail on %s", self.hostname)
bgp_neighbors_detail = NetBox().napalm(
self.netbox_device_id, "get_bgp_neighbors_detail"
)
self.logger.debug("raw napalm output %s", bgp_neighbors_detail)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors_detail),
self.hostname,
)
return (
bgp_neighbors_detail
if not ip_address
else self.find_bgp_neighbor_detail(bgp_neighbors_detail, ip_address)
)
def get_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the router
using either NAPALM or NetBox depending on the use_netbox flag and their
respective detail.
If the `ip_address` named parameter is not `None`, only the neighbor with this
IP address will be returned
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
@cached_as(self, timeout=settings.CACHE_BGP_DETAIL_TIMEOUT)
def _get_bgp_neighbors_detail():
if self.use_netbox:
return self.get_netbox_bgp_neighbors_detail(ip_address=ip_address)
else:
return self.get_napalm_bgp_neighbors_detail(ip_address=ip_address)
return _get_bgp_neighbors_detail()
def bgp_neighbors_detail_as_list(self, bgp_neighbors_detail):
"""
Returns a list based on the dict returned by calling
get_napalm_bgp_neighbors_detail.
"""
flattened = []
if not bgp_neighbors_detail:
return flattened
for vrf in bgp_neighbors_detail:
for asn in bgp_neighbors_detail[vrf]:
flattened.extend(bgp_neighbors_detail[vrf][asn])
return flattened
class RoutingPolicy(ChangeLoggedModel, TaggableModel):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True, max_length=255)
type = models.CharField(
max_length=50,
choices=RoutingPolicyType.choices,
default=RoutingPolicyType.IMPORT,
)
weight = models.PositiveSmallIntegerField(
default=0, help_text="The higher the number, the higher the priority"
)
address_family = models.PositiveSmallIntegerField(
default=IPFamily.ALL, choices=IPFamily.choices
)
config_context = models.JSONField(blank=True, null=True)
comments = models.TextField(blank=True)
class Meta:
verbose_name_plural = "routing policies"
ordering = ["-weight", "name"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:routingpolicy_details", args=[self.pk])
def get_type_html(self, display_name=False):
if self.type == RoutingPolicyType.EXPORT:
badge_type = "badge-primary"
text = self.get_type_display()
elif self.type == RoutingPolicyType.IMPORT:
badge_type = "badge-info"
text = self.get_type_display()
elif self.type == RoutingPolicyType.IMPORT_EXPORT:
badge_type = "badge-dark"
text = self.get_type_display()
else:
badge_type = "badge-secondary"
text = "Unknown"
if display_name:
text = self.name
return mark_safe(f'<span class="badge {badge_type}">{text}</span>')
|
import ipaddress
import logging
import napalm
from cacheops import cached_as
from django.conf import settings
from django.db import models, transaction
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from netfields import InetAddressField
from net.models import Connection
from netbox.api import NetBox
from peering import call_irr_as_set_resolver, parse_irr_as_set
from peering.enums import (
BGPRelationship,
BGPState,
CommunityType,
DeviceState,
IPFamily,
RoutingPolicyType,
)
from peering.fields import ASNField, CommunityField
from peeringdb.functions import get_shared_internet_exchanges
from peeringdb.models import IXLanPrefix, Network, NetworkContact, NetworkIXLan
from utils.models import ChangeLoggedModel, TaggableModel
from .abstracts import AbstractGroup, BGPSession
from .mixins import PolicyMixin
class AutonomousSystem(ChangeLoggedModel, TaggableModel, PolicyMixin):
asn = ASNField(unique=True, verbose_name="ASN")
name = models.CharField(max_length=128)
name_peeringdb_sync = models.BooleanField(default=True)
contact_name = models.CharField(max_length=50, blank=True)
contact_phone = models.CharField(max_length=20, blank=True)
contact_email = models.EmailField(blank=True, verbose_name="Contact e-mail")
comments = models.TextField(blank=True)
irr_as_set = models.CharField(
max_length=255, blank=True, null=True, verbose_name="IRR AS-SET"
)
irr_as_set_peeringdb_sync = models.BooleanField(default=True)
ipv6_max_prefixes = models.PositiveIntegerField(
blank=True, default=0, verbose_name="IPv6 max prefix"
)
ipv6_max_prefixes_peeringdb_sync = models.BooleanField(default=True)
ipv4_max_prefixes = models.PositiveIntegerField(
blank=True, default=0, verbose_name="IPv4 max prefix"
)
ipv4_max_prefixes_peeringdb_sync = models.BooleanField(default=True)
import_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_import_routing_policies"
)
export_routing_policies = models.ManyToManyField(
"RoutingPolicy", blank=True, related_name="%(class)s_export_routing_policies"
)
communities = models.ManyToManyField("Community", blank=True)
prefixes = models.JSONField(blank=True, null=True, editable=False)
affiliated = models.BooleanField(default=False)
logger = logging.getLogger("peering.manager.peering")
class Meta:
ordering = ["asn", "affiliated"]
permissions = [("send_email", "Can send e-mails to AS contact")]
@property
def peeringdb_network(self):
try:
return Network.objects.get(asn=self.asn)
except Network.DoesNotExist:
return None
@property
def general_policy(self):
if self.peeringdb_network:
return self.peeringdb_network.policy_general
else:
return None
@property
def peeringdb_contacts(self):
if self.peeringdb_network:
return NetworkContact.objects.filter(net=self.peeringdb_network)
return []
@property
def can_receive_email(self):
return "" != self.contact_email or self.peeringdb_contacts
@staticmethod
def create_from_peeringdb(asn):
try:
network = Network.objects.get(asn=asn)
except Network.DoesNotExist:
return None
autonomous_system, _ = AutonomousSystem.objects.get_or_create(
asn=network.asn,
defaults={
"name": network.name,
"irr_as_set": network.irr_as_set,
"ipv6_max_prefixes": network.info_prefixes6,
"ipv4_max_prefixes": network.info_prefixes4,
},
)
return autonomous_system
def __str__(self):
return f"AS{self.asn} - {self.name}"
def export_policies(self):
return self.export_routing_policies.all()
def import_policies(self):
return self.import_routing_policies.all()
def get_absolute_url(self):
return reverse("peering:autonomoussystem_details", args=[self.pk])
def get_internet_exchange_peering_sessions_list_url(self):
return reverse(
"peering:autonomoussystem_internet_exchange_peering_sessions",
args=[self.pk],
)
def get_direct_peering_sessions_list_url(self):
return reverse(
"peering:autonomoussystem_direct_peering_sessions", args=[self.pk]
)
def get_direct_peering_sessions(self, bgp_group=None):
"""
Returns all direct peering sessions with this AS.
"""
return DirectPeeringSession.objects.filter(autonomous_system=self)
def get_ixp_peering_sessions(self, internet_exchange_point=None):
"""
Returns all IXP peering sessions with this AS.
"""
return InternetExchangePeeringSession.objects.filter(autonomous_system=self)
def get_internet_exchange_points(self, other):
"""
Returns all IXPs this AS is peering on (with us).
"""
return InternetExchange.objects.filter(
pk__in=Connection.objects.filter(
pk__in=self.get_ixp_peering_sessions().values_list(
"ixp_connection", flat=True
)
).values_list("internet_exchange_point", flat=True),
local_autonomous_system=other,
)
def get_shared_internet_exchange_points(self, other):
"""
Returns all IXPs this AS has with the other one.
"""
return InternetExchange.objects.filter(
peeringdb_ixlan__id__in=get_shared_internet_exchanges(
self, other
).values_list("id", flat=True),
local_autonomous_system=other,
)
def get_missing_peering_sessions(self, other, internet_exchange_point=None):
"""
Returns all missing peering sessions between this AS and the other one on a
given IXP. As we are relying on PeeringDB to discover sessions there are no
points in doing so if the IXP is not linked to a PeeringDB record.
If the IXP is not specified then missing peering sessions will be returned for
all shared IXPs between this and the other AS.
"""
if self == other:
return NetworkIXLan.objects.none()
filter = {"autonomous_system": self}
if internet_exchange_point:
filter["ixp_connection__id__in"] = Connection.objects.filter(
internet_exchange_point=internet_exchange_point
).values_list("id", flat=True)
ip_sessions = InternetExchangePeeringSession.objects.filter(
**filter
).values_list("ip_address", flat=True)
qs_filter = Q(asn=self.asn) & (
~Q(ipaddr6__in=ip_sessions) | ~Q(ipaddr4__in=ip_sessions)
)
if internet_exchange_point:
qs_filter &= Q(ixlan=internet_exchange_point.peeringdb_ixlan)
else:
qs_filter &= Q(
ixlan__in=self.get_shared_internet_exchange_points(other).values_list(
"peeringdb_ixlan", flat=True
)
)
return NetworkIXLan.objects.filter(qs_filter)
def synchronize_with_peeringdb(self):
"""
Synchronizes AS properties with those found in PeeringDB.
"""
network = self.peeringdb_network
if not network:
return False
if self.name_peeringdb_sync:
self.name = network.name
if self.irr_as_set_peeringdb_sync:
self.irr_as_set = network.irr_as_set
if self.ipv6_max_prefixes_peeringdb_sync:
self.ipv6_max_prefixes = network.info_prefixes6
if self.ipv4_max_prefixes_peeringdb_sync:
self.ipv4_max_prefixes = network.info_prefixes4
try:
self.save()
return True
except Exception:
return False
def retrieve_irr_as_set_prefixes(self):
"""
Returns a prefix list for this AS' IRR AS-SET. If none is provided the
function will try to look for a prefix list based on the AS number.
This function will actually retrieve prefixes from IRR online sources. It is
expected to be slow due to network operations and depending on the size of the
data to process.
"""
fallback = False
as_sets = parse_irr_as_set(self.asn, self.irr_as_set)
prefixes = {"ipv6": [], "ipv4": []}
try:
# For each AS-SET try getting IPv6 and IPv4 prefixes
for as_set in as_sets:
prefixes["ipv6"].extend(
call_irr_as_set_resolver(as_set, address_family=6)
)
prefixes["ipv4"].extend(
call_irr_as_set_resolver(as_set, address_family=4)
)
except ValueError:
# Error parsing AS-SETs
fallback = True
# If fallback is triggered or no prefixes found, try prefix lookup by ASN
if fallback or not prefixes["ipv6"] and not prefixes["ipv4"]:
self.logger.debug(
f"falling back to AS number lookup to search for {self.asn} prefixes"
)
prefixes["ipv6"].extend(
call_irr_as_set_resolver(f"AS{self.asn}", address_family=6)
)
prefixes["ipv4"].extend(
call_irr_as_set_resolver(f"AS{self.asn}", address_family=4)
)
return prefixes
def get_irr_as_set_prefixes(self, address_family=0):
"""
Returns a prefix list for this AS' IRR AS-SET. If none is provided the list
will be empty.
If specified, only a list of the prefixes for the given address family will be
returned. 6 for IPv6, 4 for IPv4, both for all other values.
The stored database value will be used if it exists.
"""
prefixes = (
self.prefixes if self.prefixes else self.retrieve_irr_as_set_prefixes()
)
if address_family == 6:
return prefixes["ipv6"]
elif address_family == 4:
return prefixes["ipv4"]
else:
return prefixes
def get_contact_email_addresses(self):
"""
Returns a list of all contacts with their respective e-mails addresses.
The returned list can be used in form choice fields.
"""
addresses = []
# Append the contact set by the user if one has been set
if self.contact_email:
addresses.append(
(
self.contact_email,
f"{self.contact_name} - {self.contact_email}"
if self.contact_name
else self.contact_email,
)
)
# Append the contacts found in PeeringDB, avoid re-adding a contact if the
# email address is the same as the one set by the user manually
for contact in self.peeringdb_contacts:
if contact.email and contact.email not in [a[0] for a in addresses]:
addresses.append(
(
contact.email,
f"{contact.name} - {contact.email}"
if contact.name
else contact.email,
)
)
return addresses
def get_cc_email_contacts(self):
"""
Returns a list of user defined CC contacts from settings
"""
addresses = []
# Extract user defined addresses
for email in settings.EMAIL_CC_CONTACTS:
if isinstance(email, tuple):
addresses.append(
(
email[0],
f"{email[1]} - {email[0]}" if len(email) > 1 else email[0],
)
)
else:
addresses.append(
(
email,
email,
)
)
return addresses
def get_email_context(self):
"""
Returns a dict, to be used in a Jinja2 environment, that holds enough data to
help in creating an e-mail from a template.
"""
affiliated = AutonomousSystem.objects.filter(affiliated=True)
return {"affiliated_autonomous_systems": affiliated, "autonomous_system": self}
def generate_email(self, email):
"""
Renders an e-mail from a template.
"""
return email.render(self.get_email_context())
class BGPGroup(AbstractGroup):
logger = logging.getLogger("peering.manager.peering")
class Meta(AbstractGroup.Meta):
ordering = ["name", "slug"]
verbose_name = "BGP group"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:bgpgroup_details", args=[self.pk])
def get_peering_sessions_list_url(self):
return reverse("peering:bgpgroup_peering_sessions", args=[self.pk])
def get_peering_sessions(self):
return DirectPeeringSession.objects.filter(bgp_group=self)
def poll_peering_sessions(self):
if not self.check_bgp_session_states:
self.logger.debug(
'ignoring session states for %s, reason: "check disabled"',
self.name.lower(),
)
return False
peering_sessions = DirectPeeringSession.objects.prefetch_related(
"router"
).filter(bgp_group=self)
if not peering_sessions:
# Empty result no need to go further
return False
# Get BGP neighbors details from router, but only get them once
bgp_neighbors_detail = {}
for session in peering_sessions:
if session.router not in bgp_neighbors_detail:
detail = session.router.get_bgp_neighbors_detail()
bgp_neighbors_detail.update(
{
session.router: session.router.bgp_neighbors_detail_as_list(
detail
)
}
)
if not bgp_neighbors_detail:
# Empty result no need to go further
return False
with transaction.atomic():
for router, detail in bgp_neighbors_detail.items():
for session in detail:
ip_address = session["remote_address"]
self.logger.debug(
"looking for session %s in %s", ip_address, self.name.lower()
)
try:
peering_session = DirectPeeringSession.objects.get(
ip_address=ip_address, bgp_group=self, router=router
)
# Get info that we are actually looking for
state = session["connection_state"].lower()
received = session["received_prefix_count"]
advertised = session["advertised_prefix_count"]
self.logger.debug(
"found session %s in %s with state %s",
ip_address,
self.name.lower(),
state,
)
# Update fields
peering_session.bgp_state = state
peering_session.received_prefix_count = (
0 if received < 0 else received
)
peering_session.advertised_prefix_count = (
0 if advertised < 0 else advertised
)
# Update the BGP state of the session
if peering_session.bgp_state == BGPState.ESTABLISHED:
peering_session.last_established_state = timezone.now()
peering_session.save()
except DirectPeeringSession.DoesNotExist:
self.logger.debug(
"session %s in %s not found", ip_address, self.name.lower()
)
# Save last session states update
self.bgp_session_states_update = timezone.now()
self.save()
return True
class Community(ChangeLoggedModel, TaggableModel):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True, max_length=255)
value = CommunityField(max_length=50)
type = models.CharField(
max_length=50, choices=CommunityType.choices, default=CommunityType.INGRESS
)
comments = models.TextField(blank=True)
class Meta:
verbose_name_plural = "communities"
ordering = ["value", "name"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:community_details", args=[self.pk])
def get_type_html(self):
if self.type == CommunityType.EGRESS:
badge_type = "badge-primary"
text = self.get_type_display()
elif self.type == CommunityType.INGRESS:
badge_type = "badge-info"
text = self.get_type_display()
else:
badge_type = "badge-secondary"
text = "Unknown"
return mark_safe(f'<span class="badge {badge_type}">{text}</span>')
class DirectPeeringSession(BGPSession):
local_autonomous_system = models.ForeignKey(
"AutonomousSystem",
on_delete=models.CASCADE,
related_name="%(class)s_local_autonomous_system",
null=True,
)
local_ip_address = InetAddressField(
store_prefix_length=False,
blank=True,
null=True,
verbose_name="Local IP address",
)
bgp_group = models.ForeignKey(
"BGPGroup",
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name="BGP group",
)
relationship = models.CharField(
max_length=50,
choices=BGPRelationship.choices,
help_text="Relationship with the remote peer.",
)
router = models.ForeignKey(
"Router", blank=True, null=True, on_delete=models.SET_NULL
)
class Meta(BGPSession.Meta):
ordering = [
"service_reference",
"local_autonomous_system",
"autonomous_system",
"ip_address",
]
def __str__(self):
return f"{self.get_relationship_display()} - AS{self.autonomous_system.asn} - IP {self.ip_address}"
def get_absolute_url(self):
return reverse("peering:directpeeringsession_details", args=[self.pk])
def poll(self):
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if not self.router or not self.router.platform:
log = log.format(str(self.ip_address).lower(), "no usable router attached")
elif self.bgp_group and not self.bgp_group.check_bgp_session_states:
log = log.format(self.name.lower(), "check disabled")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
# Get BGP session detail
bgp_neighbor_detail = self.router.get_bgp_neighbors_detail(
ip_address=self.ip_address
)
if bgp_neighbor_detail:
received = bgp_neighbor_detail["received_prefix_count"]
advertised = bgp_neighbor_detail["advertised_prefix_count"]
# Update fields
self.bgp_state = bgp_neighbor_detail["connection_state"].lower()
self.received_prefix_count = received if received > 0 else 0
self.advertised_prefix_count = advertised if advertised > 0 else 0
if self.bgp_state == BGPState.ESTABLISHED:
self.last_established_state = timezone.now()
self.save()
return True
return False
def get_relationship_html(self):
if self.relationship == BGPRelationship.CUSTOMER:
badge_type = "badge-danger"
elif self.relationship == BGPRelationship.PRIVATE_PEERING:
badge_type = "badge-success"
elif self.relationship == BGPRelationship.TRANSIT_PROVIDER:
badge_type = "badge-primary"
else:
badge_type = "badge-secondary"
return mark_safe(
f'<span class="badge {badge_type}">{self.get_relationship_display()}</span>'
)
class InternetExchange(AbstractGroup):
peeringdb_ixlan = models.ForeignKey(
"peeringdb.IXLan", on_delete=models.SET_NULL, blank=True, null=True
)
local_autonomous_system = models.ForeignKey(
"AutonomousSystem", on_delete=models.CASCADE, null=True
)
logger = logging.getLogger("peering.manager.peering")
class Meta(AbstractGroup.Meta):
ordering = ["local_autonomous_system", "name", "slug"]
@property
def linked_to_peeringdb(self):
"""
Tells if the PeeringDB object for this IX still exists.
"""
return self.peeringdb_ixlan is not None
@property
def has_connected_routers(self):
return (
Connection.objects.filter(
internet_exchange_point=self, router__isnull=False
).count()
> 0
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:internetexchange_details", args=[self.pk])
def get_peering_sessions_list_url(self):
return reverse("peering:internetexchange_peering_sessions", args=[self.pk])
def get_peer_list_url(self):
return reverse("peering:internet_exchange_peers", args=[self.pk])
def merged_export_policies(self, reverse=False):
# Get own policies
policies = [p for p in self.export_policies()]
return list(reversed(policies)) if reverse else policies
def merged_import_policies(self, reverse=False):
# Get own policies
policies = [p for p in self.import_policies()]
return list(reversed(policies)) if reverse else policies
def link_to_peeringdb(self):
"""
Retrieves the PeeringDB IDs for this IX based on connections.
The PeeringDB records will be returned on success. In any other cases `None`
will be returned. The value will also be saved in model's field.
"""
peeringdb_ixlan = None
for connection in Connection.objects.filter(internet_exchange_point=self):
# For each connection, try to see if a valid PeeringDB record exists and
# make sure that they all point towards the same IX
if connection.linked_to_peeringdb:
if peeringdb_ixlan is None:
peeringdb_ixlan = connection.peeringdb_netixlan.ixlan
else:
if peeringdb_ixlan != connection.peeringdb_netixlan.ixlan:
# Connections not belonging to the same IX
return None
if peeringdb_ixlan is not None:
self.peeringdb_ixlan = peeringdb_ixlan
self.save()
return peeringdb_ixlan
def get_prefixes(self):
"""
Returns all prefixes found (in PeeringDB) for this IXP.
"""
if not self.linked_to_peeringdb:
return IXLanPrefix.objects.none()
return IXLanPrefix.objects.filter(ixlan=self.peeringdb_ixlan)
def get_connections(self):
"""
Returns all connections to this IXP.
"""
return Connection.objects.filter(internet_exchange_point=self)
def get_routers(self):
return Router.objects.filter(
pk__in=self.get_connections().values_list("router", flat=True)
)
def get_peering_sessions(self):
"""
Returns all peering sessions setup over this IXP.
"""
return InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections()
)
def get_autonomous_systems(self):
"""
Returns all autonomous systems with setup peering sessions over this IXP.
"""
return AutonomousSystem.objects.filter(
pk__in=self.get_peering_sessions().values_list(
"autonomous_system", flat=True
)
)
def get_available_peers(self):
"""
Finds available peers for the AS connected to this IX.
"""
# Not linked to PeeringDB, cannot determine peers
if not self.linked_to_peeringdb:
return NetworkIXLan.objects.none()
# Get all peering sessions currently existing
existing_sessions = self.get_peering_sessions()
ipv6_sessions = []
ipv4_sessions = []
for session in existing_sessions:
ip = ipaddress.ip_address(session.ip_address)
if ip.version == 6:
ipv6_sessions.append(ip)
elif ip.version == 4:
ipv4_sessions.append(ip)
else:
self.logger.debug(f"peering session with strange ip: {ip}")
return NetworkIXLan.objects.filter(
~Q(asn=self.local_autonomous_system.asn)
& Q(ixlan=self.peeringdb_ixlan)
& (~Q(ipaddr6__in=ipv6_sessions) | ~Q(ipaddr4__in=ipv4_sessions))
).order_by("asn")
@transaction.atomic
def poll_peering_sessions(self):
# Get connected routers to this IXP
connected_routers = self.get_routers()
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if connected_routers.count() < 0:
log = log.format(self.name.lower(), "no routers connected")
elif not self.check_bgp_session_states:
log = log.format(self.name.lower(), "check disabled")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
for router in connected_routers:
# Get all BGP sessions detail
bgp_neighbors_detail = router.get_bgp_neighbors_detail()
# An error occured, probably
if not bgp_neighbors_detail:
return False
for _, as_details in bgp_neighbors_detail.items():
for _, sessions in as_details.items():
# Check BGP sessions found
for session in sessions:
ip_address = session["remote_address"]
self.logger.debug(
f"looking for session {ip_address} in {self.name.lower()}"
)
# Check if the BGP session is on this IX
try:
ip = ipaddress.ip_address(ip_address)
lookup = {"ip_address": ip_address}
for connection in router.get_connections():
# Limit scope to address in connection's subnets
if (
ip.version == 4
and ip in connection.ipv4_address.network
) or (
ip.version == 6
and ip in connection.ipv6_address.network
):
lookup["ixp_connection"] = connection
break
ixp_session = InternetExchangePeeringSession.objects.get(
**lookup
)
# Get the BGP state for the session
state = session["connection_state"].lower()
received = session["received_prefix_count"]
advertised = session["advertised_prefix_count"]
self.logger.debug(
f"found session {ip_address} in {self.name.lower()} with state {state}"
)
# Update fields
ixp_session.bgp_state = state
ixp_session.received_prefix_count = (
0 if received < 0 else received
)
ixp_session.advertised_prefix_count = (
0 if advertised < 0 else advertised
)
# Update the BGP state of the session
if ixp_session.bgp_state == BGPState.ESTABLISHED:
ixp_session.last_established_state = timezone.now()
ixp_session.save()
except InternetExchangePeeringSession.DoesNotExist:
self.logger.debug(
f"session {ip_address} in {self.name.lower()} not found"
)
# Save last session states update
self.bgp_session_states_update = timezone.now()
self.save()
return True
@transaction.atomic
def import_sessions(self, connection):
"""
Imports sessions setup on a connected router.
"""
session_number, asn_number = 0, 0
ignored_autonomous_systems = []
allowed_prefixes = self.get_prefixes()
sessions = connection.router.get_bgp_neighbors()
def is_valid(ip_address):
for p in allowed_prefixes:
if p.prefix.version == ip_address.version:
if ip_address in p.prefix:
return True
return False
for session in sessions:
ip = ipaddress.ip_address(session["ip_address"])
if not is_valid(ip):
self.logger.debug(
f"ignoring ixp session, {str(ip)} does not fit in any prefixes"
)
continue
self.logger.debug(f"processing ixp session {str(ip)}")
remote_asn = session["remote_asn"]
try:
InternetExchangePeeringSession.objects.get(
ixp_connection=connection, ip_address=ip
)
self.logger.debug(
f"ixp session {str(ip)} with as{remote_asn} already exists"
)
continue
except InternetExchangePeeringSession.DoesNotExist:
self.logger.debug(
f"ixp session {str(ip)} with as{remote_asn} does not exist"
)
# Get the AS, create it if needed
autonomous_system = AutonomousSystem.create_from_peeringdb(remote_asn)
# Do not count the AS if it does not have a PeeringDB record
if autonomous_system:
self.logger.debug(f"as{remote_asn} created")
asn_number += 1
else:
if remote_asn not in ignored_autonomous_systems:
ignored_autonomous_systems.append(remote_asn)
self.logger.debug(
f"could not create as{remote_asn}, session {str(ip)} ignored"
)
# Only add a session if we can use the AS it is linked to
if autonomous_system:
self.logger.debug(f"creating session {str(ip)}")
InternetExchangePeeringSession.objects.create(
autonomous_system=autonomous_system,
ixp_connection=connection,
ip_address=ip,
)
session_number += 1
self.logger.debug(f"session {str(ip)} created")
return session_number, asn_number
class InternetExchangePeeringSession(BGPSession):
ixp_connection = models.ForeignKey(
"net.Connection",
on_delete=models.CASCADE,
null=True,
verbose_name="IXP connection",
)
is_route_server = models.BooleanField(
blank=True, default=False, verbose_name="Route server"
)
class Meta(BGPSession.Meta):
ordering = [
"service_reference",
"autonomous_system",
"ixp_connection",
"ip_address",
]
@staticmethod
def create_from_peeringdb(affiliated, internet_exchange, netixlan):
results = []
if not netixlan:
return results
# If the IXP is not given, e.g. we are in the provisionning section, try to
# guess the IXP from the PeeringDB record
if not internet_exchange:
internet_exchange = InternetExchange.objects.filter(
local_autonomous_system=affiliated, peeringdb_ixlan=netixlan.ixlan
).first()
available_connections = Connection.objects.filter(
internet_exchange_point=internet_exchange
)
for connection in available_connections:
for version in (6, 4):
ip_address = getattr(netixlan, f"ipaddr{version}", None)
if not ip_address:
continue
params = {
"autonomous_system": AutonomousSystem.create_from_peeringdb(
netixlan.asn
),
"ixp_connection": connection,
"ip_address": ip_address.ip,
}
try:
# Try to get the session, in case it already exists
InternetExchangePeeringSession.objects.get(**params)
except InternetExchangePeeringSession.DoesNotExist:
results.append(InternetExchangePeeringSession(**params))
return results
def __str__(self):
if not self.ixp_connection:
return f"AS{self.autonomous_system.asn} - IP {self.ip_address}"
return f"{self.ixp_connection.internet_exchange_point.name} - AS{self.autonomous_system.asn} - IP {self.ip_address}"
def get_absolute_url(self):
return reverse("peering:internetexchangepeeringsession_details", args=[self.pk])
def poll(self):
# Check if we are able to get BGP details
log = 'ignoring session states on {}, reason: "{}"'
if not self.ixp_connection.router or not self.ixp_connection.router.platform:
log = log.format(str(self.ip_address).lower(), "no usable router attached")
else:
log = None
# If we cannot check for BGP details, don't do anything
if log:
self.logger.debug(log)
return False
# Get BGP session detail
bgp_neighbor_detail = self.ixp_connection.router.get_bgp_neighbors_detail(
ip_address=self.ip_address
)
if bgp_neighbor_detail:
received = bgp_neighbor_detail["received_prefix_count"]
advertised = bgp_neighbor_detail["advertised_prefix_count"]
# Update fields
self.bgp_state = bgp_neighbor_detail["connection_state"].lower()
self.received_prefix_count = received if received > 0 else 0
self.advertised_prefix_count = advertised if advertised > 0 else 0
if self.bgp_state == BGPState.ESTABLISHED:
self.last_established_state = timezone.now()
self.save()
return True
return False
def exists_in_peeringdb(self):
"""
Returns `True` if a NetworkIXLan exists for this session's IP.
"""
if isinstance(self.ip_address, str):
ip_version = ipaddress.ip_address(self.ip_address).version
else:
ip_version = self.ip_address.version
try:
NetworkIXLan.objects.get(**{f"ipaddr{ip_version}": str(self.ip_address)})
return True
except NetworkIXLan.DoesNotExist:
pass
return False
def is_abandoned(self):
"""
Returns True if a session is considered as abandoned. Returns False otherwise.
A session is *not* considered as abandoned if it matches one of the following
criteria:
* The Internet Exchange is not linked to a PeeringDB record
* User does not poll peering session states
* The peer AS has no cached PeeringDB record
* The peer AS has a cached PeeringDB record with the session IP address
* The BGP state for the session is not idle or active
"""
if (
not self.ixp_connection.linked_to_peeringdb
or not self.ixp_connection.internet_exchange_point.check_bgp_session_states
or not self.autonomous_system.peeringdb_network
or self.exists_in_peeringdb()
or self.bgp_state not in [BGPState.IDLE, BGPState.ACTIVE]
):
return False
return True
class Router(ChangeLoggedModel, TaggableModel):
local_autonomous_system = models.ForeignKey(
"AutonomousSystem", on_delete=models.CASCADE, null=True
)
name = models.CharField(max_length=128)
hostname = models.CharField(max_length=256)
platform = models.ForeignKey(
"devices.Platform",
on_delete=models.PROTECT,
blank=True,
null=True,
help_text="The router platform, used to interact with it",
)
encrypt_passwords = models.BooleanField(
blank=True,
default=False,
help_text="Try to encrypt passwords for peering sessions",
)
configuration_template = models.ForeignKey(
"Configuration", blank=True, null=True, on_delete=models.SET_NULL
)
device_state = models.CharField(
max_length=20,
choices=DeviceState.choices,
default=DeviceState.ENABLED,
blank=True,
help_text="State of the device for configuration pushes",
)
netbox_device_id = models.PositiveIntegerField(
blank=True, default=0, verbose_name="NetBox device"
)
use_netbox = models.BooleanField(
blank=True,
default=False,
help_text="Use NetBox to communicate instead of NAPALM",
)
config_context = models.JSONField(blank=True, null=True)
napalm_username = models.CharField(blank=True, null=True, max_length=256)
napalm_password = models.CharField(blank=True, null=True, max_length=256)
napalm_timeout = models.PositiveIntegerField(blank=True, default=0)
napalm_args = models.JSONField(blank=True, null=True)
comments = models.TextField(blank=True)
logger = logging.getLogger("peering.manager.napalm")
class Meta:
ordering = ["local_autonomous_system", "name"]
permissions = [
("view_router_configuration", "Can view router's configuration"),
("deploy_router_configuration", "Can deploy router's configuration"),
]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:router_details", args=[self.pk])
def get_direct_peering_sessions_list_url(self):
return reverse("peering:router_direct_peering_sessions", args=[self.pk])
def is_netbox_device(self):
return self.netbox_device_id != 0
def is_usable_for_task(self, job_result=None, logger=None):
"""
Performs pre-flight checks to understand if a router is suited for background
task processing.
"""
if logger is None:
logger = self.logger
# Ensure device is not in disabled state
if self.device_state == DeviceState.DISABLED:
if job_result:
job_result.mark_errored(
"Router is not enabled.", obj=self, logger=logger
)
job_result.save()
return False
# Check if the router runs on a supported platform
if not self.platform:
if job_result:
job_result.mark_errored(
"Router has no assigned platform.", obj=self, logger=logger
)
job_result.save()
return False
if not self.platform.napalm_driver:
if job_result:
job_result.mark_errored(
"Router's platform has no NAPALM driver.", obj=self, logger=logger
)
job_result.save()
return False
return True
def get_bgp_groups(self):
"""
Returns BGP groups that can be deployed on this router.
A group is considered as deployable on a router if direct peering sessions in
the group are also attached to the router.
"""
return BGPGroup.objects.filter(
pk__in=DirectPeeringSession.objects.filter(router=self).values_list(
"bgp_group", flat=True
)
)
def get_connections(self, internet_exchange_point=None):
"""
Returns connections attached to this router.
"""
if internet_exchange_point:
return Connection.objects.filter(
internet_exchange_point=internet_exchange_point, router=self
)
else:
return Connection.objects.filter(router=self)
def get_internet_exchange_points(self):
"""
Returns IXPs that this router is connected to.
"""
return InternetExchange.objects.filter(
pk__in=self.get_connections().values_list(
"internet_exchange_point", flat=True
)
)
def get_direct_autonomous_systems(self, bgp_group=None):
"""
Returns autonomous systems that are directly peered with this router.
"""
if bgp_group:
sessions = DirectPeeringSession.objects.filter(
bgp_group=bgp_group, router=self
).values_list("autonomous_system", flat=True)
else:
sessions = DirectPeeringSession.objects.filter(router=self).values_list(
"autonomous_system", flat=True
)
return AutonomousSystem.objects.filter(pk__in=sessions)
def get_ixp_autonomous_systems(self, internet_exchange_point=None):
"""
Returns autonomous systems with which this router peers over IXPs.
"""
return AutonomousSystem.objects.filter(
pk__in=InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections(
internet_exchange_point=internet_exchange_point
)
).values_list("autonomous_system", flat=True)
)
def get_autonomous_systems(self):
"""
Returns all autonomous systems with which this router peers.
"""
return self.get_direct_autonomous_systems().union(
self.get_ixp_autonomous_systems()
)
def get_direct_peering_sessions(self, bgp_group=None):
"""
Returns all direct peering sessions setup on this router.
"""
if bgp_group:
return DirectPeeringSession.objects.filter(bgp_group=bgp_group, router=self)
else:
return DirectPeeringSession.objects.filter(router=self)
def get_ixp_peering_sessions(self, internet_exchange_point=None):
"""
Returns all IXP peering sessions setup on this router.
"""
return InternetExchangePeeringSession.objects.filter(
ixp_connection__in=self.get_connections(
internet_exchange_point=internet_exchange_point
)
)
def get_configuration_context(self):
"""
Returns a dict, to be used in a Jinja2 environment, that holds enough data to
help in creating a configuration from a template.
"""
return {
"autonomous_systems": self.get_autonomous_systems(),
"bgp_groups": self.get_bgp_groups(),
"communities": Community.objects.all(),
"internet_exchange_points": self.get_internet_exchange_points(),
"local_as": self.local_autonomous_system,
"routing_policies": RoutingPolicy.objects.all(),
"router": self,
}
def generate_configuration(self):
"""
Returns the configuration of a router according to the template in use.
If no template is used, an empty string is returned.
"""
if self.configuration_template:
context = self.get_configuration_context()
return self.configuration_template.render(context)
else:
return ""
def get_napalm_device(self):
"""
Returns an instance of the NAPALM driver to connect to a router.
"""
if not self.platform or not self.platform.napalm_driver:
self.logger.debug("no napalm driver defined")
return None
self.logger.debug(f"looking for napalm driver '{self.platform.napalm_driver}'")
try:
# Driver found, instanciate it
driver = napalm.get_network_driver(self.platform.napalm_driver)
self.logger.debug(f"found napalm driver '{self.platform.napalm_driver}'")
# Merge NAPALM args: first global, then platform's, finish with router's
args = settings.NAPALM_ARGS
if self.platform.napalm_args:
args.update(self.platform.napalm_args)
if self.napalm_args:
args.update(self.napalm_args)
return driver(
hostname=self.hostname,
username=self.napalm_username or settings.NAPALM_USERNAME,
password=<PASSWORD>.<PASSWORD>_password or settings.NAPALM_PASSWORD,
timeout=self.napalm_timeout or settings.NAPALM_TIMEOUT,
optional_args=args,
)
except napalm.base.exceptions.ModuleImportError:
# Unable to import proper driver from napalm
# Most probably due to a broken install
self.logger.error(
f"no napalm driver: '{self.platform.napalm_driver}' for platform: '{self.platform}' found (not installed or does not exist)"
)
return None
def open_napalm_device(self, device):
"""
Opens a connection with a device using NAPALM.
This method returns True if the connection is properly opened or False
in any other cases. It handles exceptions that can occur during the
connection opening process by itself.
It is a wrapper method mostly used for logging purpose.
"""
success = False
if not device:
return success
try:
self.logger.debug(f"connecting to {self.hostname}")
device.open()
except napalm.base.exceptions.ConnectionException as e:
self.logger.error(
f'error while trying to connect to {self.hostname} reason "{e}"'
)
except Exception:
self.logger.error(f"error while trying to connect to {self.hostname}")
else:
self.logger.debug(f"successfully connected to {self.hostname}")
success = True
finally:
return success
def close_napalm_device(self, device):
"""
Closes a connection with a device using NAPALM.
This method returns True if the connection is properly closed or False
if the device is not valid.
It is a wrapper method mostly used for logging purpose.
"""
if not device:
return False
device.close()
self.logger.debug(f"closing connection with {self.hostname}")
return True
def test_napalm_connection(self):
"""
Opens and closes a connection with a device using NAPALM to see if it
is possible to interact with it.
This method returns True only if the connection opening and closing are
both successful.
"""
opened, alive, closed = False, False, False
device = self.get_napalm_device()
# Open and close the test_napalm_connection
self.logger.debug(f"testing connection with {self.hostname}")
opened = self.open_napalm_device(device)
if opened:
alive = device.is_alive()
if alive:
closed = self.close_napalm_device(device)
# Issue while opening or closing the connection
if not opened or not closed or not alive:
self.logger.error(
f"cannot connect to {self.hostname}, napalm functions won't work"
)
return opened and closed and alive
def set_napalm_configuration(self, config, commit=False):
"""
Tries to merge a given configuration on a device using NAPALM.
This methods returns the changes applied to the configuration if the
merge was successful. It will return None in any other cases.
The optional named argument 'commit' is a boolean which is used to
know if the changes must be commited or discarded. The default value is
False which means that the changes will be discarded.
"""
error, changes = None, None
# Ensure device is enabled, we allow maintenance mode to force a config push
if self.device_state == DeviceState.DISABLED:
self.logger.debug(f"device: {self.name} is disabled, exiting config push")
return "device is disabled, cannot deploy config", changes
# Make sure there actually a configuration to merge
if config is None or not isinstance(config, str) or not config.strip():
self.logger.debug(f"no configuration to merge: {config}")
error = "no configuration found to be merged"
return error, changes
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
try:
# Load the config
self.logger.debug(f"merging configuration on {self.hostname}")
device.load_merge_candidate(config=config)
self.logger.debug(f"merged configuration\n{config}")
# Get the config diff
self.logger.debug(
f"checking for configuration changes on {self.hostname}"
)
changes = device.compare_config()
self.logger.debug(f"raw napalm output\n{changes}")
# Commit the config if required
if commit:
self.logger.debug(f"commiting configuration on {self.hostname}")
device.commit_config()
else:
self.logger.debug(f"discarding configuration on {self.hostname}")
device.discard_config()
except napalm.base.exceptions.MergeConfigException as e:
error = f'unable to merge configuration on {self.hostname} reason "{e}"'
changes = None
self.logger.debug(error)
except Exception as e:
error = f'unable to merge configuration on {self.hostname} reason "{e}"'
changes = None
self.logger.debug(error)
else:
self.logger.debug(
f"successfully merged configuration on {self.hostname}"
)
finally:
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
f"error while closing connection with {self.hostname}"
)
else:
error = f"unable to connect to {self.hostname}"
return error, changes
def _napalm_bgp_neighbors_to_peer_list(self, napalm_dict):
bgp_peers = []
if not napalm_dict:
return bgp_peers
# For each VRF
for vrf in napalm_dict:
# Get peers inside it
peers = napalm_dict[vrf]["peers"]
self.logger.debug(
"found %s bgp neighbors in %s vrf on %s", len(peers), vrf, self.hostname
)
# For each peer handle its IP address and the needed details
for ip, details in peers.items():
if "remote_as" not in details:
self.logger.debug(
"ignored bgp neighbor %s in %s vrf on %s",
ip,
vrf,
self.hostname,
)
elif ip in [str(i["ip_address"]) for i in bgp_peers]:
self.logger.debug(
"duplicate bgp neighbor %s on %s", ip, self.hostname
)
else:
try:
# Save the BGP session (IP and remote ASN)
bgp_peers.append(
{
"ip_address": ipaddress.ip_address(ip),
"remote_asn": details["remote_as"],
}
)
except ValueError as e:
# Error while parsing the IP address
self.logger.error(
'ignored bgp neighbor %s in %s vrf on %s reason "%s"',
ip,
vrf,
self.hostname,
e,
)
# Force next iteration
continue
return bgp_peers
def get_napalm_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NAPALM.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_sessions = []
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
# Get all BGP neighbors on the router
self.logger.debug("getting bgp neighbors on %s", self.hostname)
bgp_neighbors = device.get_bgp_neighbors()
self.logger.debug("raw napalm output %s", bgp_neighbors)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors),
self.hostname,
)
bgp_sessions = self._napalm_bgp_neighbors_to_peer_list(bgp_neighbors)
self.logger.debug(
"found %s bgp neighbors on %s", len(bgp_sessions), self.hostname
)
# Close connection to the device
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
"error while closing connection with %s", self.hostname
)
return bgp_sessions
def get_netbox_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NetBox.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_sessions = []
self.logger.debug("getting bgp neighbors on %s", self.hostname)
bgp_neighbors = NetBox().napalm(self.netbox_device_id, "get_bgp_neighbors")
self.logger.debug("raw napalm output %s", bgp_neighbors)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s", len(bgp_neighbors), self.hostname
)
bgp_sessions = self._napalm_bgp_neighbors_to_peer_list(bgp_neighbors)
self.logger.debug(
"found %s bgp neighbors on %s", len(bgp_sessions), self.hostname
)
return bgp_sessions
def get_bgp_neighbors(self):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using either NAPALM or NetBox based on the use_netbox flag.
Each dictionary contains two keys 'ip_address' and 'remote_asn'.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
if self.use_netbox:
return self.get_netbox_bgp_neighbors()
else:
return self.get_napalm_bgp_neighbors()
def find_bgp_neighbor_detail(self, bgp_neighbors, ip_address):
"""
Finds and returns a single BGP neighbor amongst others.
"""
# NAPALM dict expected
if not isinstance(bgp_neighbors, dict):
return None
# Make sure to use an IP object
if isinstance(ip_address, str):
ip_address = ipaddress.ip_address(ip_address)
for _, asn in bgp_neighbors.items():
for _, neighbors in asn.items():
for neighbor in neighbors:
neighbor_ip_address = ipaddress.ip_address(
neighbor["remote_address"]
)
if ip_address == neighbor_ip_address:
return neighbor
return None
def get_napalm_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NAPALM and there respective detail.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_neighbors_detail = []
device = self.get_napalm_device()
opened = self.open_napalm_device(device)
if opened:
# Get all BGP neighbors on the router
self.logger.debug("getting bgp neighbors detail on %s", self.hostname)
bgp_neighbors_detail = device.get_bgp_neighbors_detail()
self.logger.debug("raw napalm output %s", bgp_neighbors_detail)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors_detail),
self.hostname,
)
# Close connection to the device
closed = self.close_napalm_device(device)
if not closed:
self.logger.debug(
"error while closing connection with %s", self.hostname
)
return (
bgp_neighbors_detail
if not ip_address
else self.find_bgp_neighbor_detail(bgp_neighbors_detail, ip_address)
)
def get_netbox_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the
router using NetBox and their respective detail.
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
bgp_neighbors_detail = []
self.logger.debug("getting bgp neighbors detail on %s", self.hostname)
bgp_neighbors_detail = NetBox().napalm(
self.netbox_device_id, "get_bgp_neighbors_detail"
)
self.logger.debug("raw napalm output %s", bgp_neighbors_detail)
self.logger.debug(
"found %s vrfs with bgp neighbors on %s",
len(bgp_neighbors_detail),
self.hostname,
)
return (
bgp_neighbors_detail
if not ip_address
else self.find_bgp_neighbor_detail(bgp_neighbors_detail, ip_address)
)
def get_bgp_neighbors_detail(self, ip_address=None):
"""
Returns a list of dictionaries listing all BGP neighbors found on the router
using either NAPALM or NetBox depending on the use_netbox flag and their
respective detail.
If the `ip_address` named parameter is not `None`, only the neighbor with this
IP address will be returned
If an error occurs or no BGP neighbors can be found, the returned list
will be empty.
"""
@cached_as(self, timeout=settings.CACHE_BGP_DETAIL_TIMEOUT)
def _get_bgp_neighbors_detail():
if self.use_netbox:
return self.get_netbox_bgp_neighbors_detail(ip_address=ip_address)
else:
return self.get_napalm_bgp_neighbors_detail(ip_address=ip_address)
return _get_bgp_neighbors_detail()
def bgp_neighbors_detail_as_list(self, bgp_neighbors_detail):
"""
Returns a list based on the dict returned by calling
get_napalm_bgp_neighbors_detail.
"""
flattened = []
if not bgp_neighbors_detail:
return flattened
for vrf in bgp_neighbors_detail:
for asn in bgp_neighbors_detail[vrf]:
flattened.extend(bgp_neighbors_detail[vrf][asn])
return flattened
class RoutingPolicy(ChangeLoggedModel, TaggableModel):
name = models.CharField(max_length=128)
slug = models.SlugField(unique=True, max_length=255)
type = models.CharField(
max_length=50,
choices=RoutingPolicyType.choices,
default=RoutingPolicyType.IMPORT,
)
weight = models.PositiveSmallIntegerField(
default=0, help_text="The higher the number, the higher the priority"
)
address_family = models.PositiveSmallIntegerField(
default=IPFamily.ALL, choices=IPFamily.choices
)
config_context = models.JSONField(blank=True, null=True)
comments = models.TextField(blank=True)
class Meta:
verbose_name_plural = "routing policies"
ordering = ["-weight", "name"]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("peering:routingpolicy_details", args=[self.pk])
def get_type_html(self, display_name=False):
if self.type == RoutingPolicyType.EXPORT:
badge_type = "badge-primary"
text = self.get_type_display()
elif self.type == RoutingPolicyType.IMPORT:
badge_type = "badge-info"
text = self.get_type_display()
elif self.type == RoutingPolicyType.IMPORT_EXPORT:
badge_type = "badge-dark"
text = self.get_type_display()
else:
badge_type = "badge-secondary"
text = "Unknown"
if display_name:
text = self.name
return mark_safe(f'<span class="badge {badge_type}">{text}</span>')
|
en
| 0.854915
|
Returns all direct peering sessions with this AS. Returns all IXP peering sessions with this AS. Returns all IXPs this AS is peering on (with us). Returns all IXPs this AS has with the other one. Returns all missing peering sessions between this AS and the other one on a given IXP. As we are relying on PeeringDB to discover sessions there are no points in doing so if the IXP is not linked to a PeeringDB record. If the IXP is not specified then missing peering sessions will be returned for all shared IXPs between this and the other AS. Synchronizes AS properties with those found in PeeringDB. Returns a prefix list for this AS' IRR AS-SET. If none is provided the function will try to look for a prefix list based on the AS number. This function will actually retrieve prefixes from IRR online sources. It is expected to be slow due to network operations and depending on the size of the data to process. # For each AS-SET try getting IPv6 and IPv4 prefixes # Error parsing AS-SETs # If fallback is triggered or no prefixes found, try prefix lookup by ASN Returns a prefix list for this AS' IRR AS-SET. If none is provided the list will be empty. If specified, only a list of the prefixes for the given address family will be returned. 6 for IPv6, 4 for IPv4, both for all other values. The stored database value will be used if it exists. Returns a list of all contacts with their respective e-mails addresses. The returned list can be used in form choice fields. # Append the contact set by the user if one has been set # Append the contacts found in PeeringDB, avoid re-adding a contact if the # email address is the same as the one set by the user manually Returns a list of user defined CC contacts from settings # Extract user defined addresses Returns a dict, to be used in a Jinja2 environment, that holds enough data to help in creating an e-mail from a template. Renders an e-mail from a template. # Empty result no need to go further # Get BGP neighbors details from router, but only get them once # Empty result no need to go further # Get info that we are actually looking for # Update fields # Update the BGP state of the session # Save last session states update # Check if we are able to get BGP details # If we cannot check for BGP details, don't do anything # Get BGP session detail # Update fields Tells if the PeeringDB object for this IX still exists. # Get own policies # Get own policies Retrieves the PeeringDB IDs for this IX based on connections. The PeeringDB records will be returned on success. In any other cases `None` will be returned. The value will also be saved in model's field. # For each connection, try to see if a valid PeeringDB record exists and # make sure that they all point towards the same IX # Connections not belonging to the same IX Returns all prefixes found (in PeeringDB) for this IXP. Returns all connections to this IXP. Returns all peering sessions setup over this IXP. Returns all autonomous systems with setup peering sessions over this IXP. Finds available peers for the AS connected to this IX. # Not linked to PeeringDB, cannot determine peers # Get all peering sessions currently existing # Get connected routers to this IXP # Check if we are able to get BGP details # If we cannot check for BGP details, don't do anything # Get all BGP sessions detail # An error occured, probably # Check BGP sessions found # Check if the BGP session is on this IX # Limit scope to address in connection's subnets # Get the BGP state for the session # Update fields # Update the BGP state of the session # Save last session states update Imports sessions setup on a connected router. # Get the AS, create it if needed # Do not count the AS if it does not have a PeeringDB record # Only add a session if we can use the AS it is linked to # If the IXP is not given, e.g. we are in the provisionning section, try to # guess the IXP from the PeeringDB record # Try to get the session, in case it already exists # Check if we are able to get BGP details # If we cannot check for BGP details, don't do anything # Get BGP session detail # Update fields Returns `True` if a NetworkIXLan exists for this session's IP. Returns True if a session is considered as abandoned. Returns False otherwise. A session is *not* considered as abandoned if it matches one of the following criteria: * The Internet Exchange is not linked to a PeeringDB record * User does not poll peering session states * The peer AS has no cached PeeringDB record * The peer AS has a cached PeeringDB record with the session IP address * The BGP state for the session is not idle or active Performs pre-flight checks to understand if a router is suited for background task processing. # Ensure device is not in disabled state # Check if the router runs on a supported platform Returns BGP groups that can be deployed on this router. A group is considered as deployable on a router if direct peering sessions in the group are also attached to the router. Returns connections attached to this router. Returns IXPs that this router is connected to. Returns autonomous systems that are directly peered with this router. Returns autonomous systems with which this router peers over IXPs. Returns all autonomous systems with which this router peers. Returns all direct peering sessions setup on this router. Returns all IXP peering sessions setup on this router. Returns a dict, to be used in a Jinja2 environment, that holds enough data to help in creating a configuration from a template. Returns the configuration of a router according to the template in use. If no template is used, an empty string is returned. Returns an instance of the NAPALM driver to connect to a router. # Driver found, instanciate it # Merge NAPALM args: first global, then platform's, finish with router's # Unable to import proper driver from napalm # Most probably due to a broken install Opens a connection with a device using NAPALM. This method returns True if the connection is properly opened or False in any other cases. It handles exceptions that can occur during the connection opening process by itself. It is a wrapper method mostly used for logging purpose. Closes a connection with a device using NAPALM. This method returns True if the connection is properly closed or False if the device is not valid. It is a wrapper method mostly used for logging purpose. Opens and closes a connection with a device using NAPALM to see if it is possible to interact with it. This method returns True only if the connection opening and closing are both successful. # Open and close the test_napalm_connection # Issue while opening or closing the connection Tries to merge a given configuration on a device using NAPALM. This methods returns the changes applied to the configuration if the merge was successful. It will return None in any other cases. The optional named argument 'commit' is a boolean which is used to know if the changes must be commited or discarded. The default value is False which means that the changes will be discarded. # Ensure device is enabled, we allow maintenance mode to force a config push # Make sure there actually a configuration to merge # Load the config # Get the config diff # Commit the config if required # For each VRF # Get peers inside it # For each peer handle its IP address and the needed details # Save the BGP session (IP and remote ASN) # Error while parsing the IP address # Force next iteration Returns a list of dictionaries listing all BGP neighbors found on the router using NAPALM. Each dictionary contains two keys 'ip_address' and 'remote_asn'. If an error occurs or no BGP neighbors can be found, the returned list will be empty. # Get all BGP neighbors on the router # Close connection to the device Returns a list of dictionaries listing all BGP neighbors found on the router using NetBox. Each dictionary contains two keys 'ip_address' and 'remote_asn'. If an error occurs or no BGP neighbors can be found, the returned list will be empty. Returns a list of dictionaries listing all BGP neighbors found on the router using either NAPALM or NetBox based on the use_netbox flag. Each dictionary contains two keys 'ip_address' and 'remote_asn'. If an error occurs or no BGP neighbors can be found, the returned list will be empty. Finds and returns a single BGP neighbor amongst others. # NAPALM dict expected # Make sure to use an IP object Returns a list of dictionaries listing all BGP neighbors found on the router using NAPALM and there respective detail. If an error occurs or no BGP neighbors can be found, the returned list will be empty. # Get all BGP neighbors on the router # Close connection to the device Returns a list of dictionaries listing all BGP neighbors found on the router using NetBox and their respective detail. If an error occurs or no BGP neighbors can be found, the returned list will be empty. Returns a list of dictionaries listing all BGP neighbors found on the router using either NAPALM or NetBox depending on the use_netbox flag and their respective detail. If the `ip_address` named parameter is not `None`, only the neighbor with this IP address will be returned If an error occurs or no BGP neighbors can be found, the returned list will be empty. Returns a list based on the dict returned by calling get_napalm_bgp_neighbors_detail.
| 1.579162
| 2
|
tests/test_logging.py
|
k-dominik/pytest-qt
| 0
|
6625384
|
import datetime
import pytest
from pytestqt.qt_compat import qt_api
@pytest.mark.parametrize("test_succeeds", [True, False])
@pytest.mark.parametrize("qt_log", [True, False])
def test_basic_logging(testdir, test_succeeds, qt_log):
"""
Test Qt logging capture output.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
import sys
from pytestqt.qt_compat import qt_api
def to_unicode(s):
return s.decode('utf-8', 'replace') if isinstance(s, bytes) else s
if qt_api.qInstallMessageHandler:
def print_msg(msg_type, context, message):
sys.stderr.write(to_unicode(message) + '\\n')
qt_api.qInstallMessageHandler(print_msg)
else:
def print_msg(msg_type, message):
sys.stderr.write(to_unicode(message) + '\\n')
qt_api.qInstallMsgHandler(print_msg)
def test_types():
# qInfo is not exposed by the bindings yet (#225)
# qt_api.qInfo('this is an INFO message')
qt_api.qDebug('this is a DEBUG message')
qt_api.qWarning('this is a WARNING message')
qt_api.qCritical('this is a CRITICAL message')
assert {}
""".format(
test_succeeds
)
)
res = testdir.runpytest(*(["--no-qt-log"] if not qt_log else []))
if test_succeeds:
assert "Captured Qt messages" not in res.stdout.str()
assert "Captured stderr call" not in res.stdout.str()
else:
if qt_log:
res.stdout.fnmatch_lines(
[
"*-- Captured Qt messages --*",
# qInfo is not exposed by the bindings yet (#232)
# '*QtInfoMsg: this is an INFO message*',
"*QtDebugMsg: this is a DEBUG message*",
"*QtWarningMsg: this is a WARNING message*",
"*QtCriticalMsg: this is a CRITICAL message*",
]
)
else:
res.stdout.fnmatch_lines(
[
"*-- Captured stderr call --*",
# qInfo is not exposed by the bindings yet (#232)
# '*QtInfoMsg: this is an INFO message*',
# 'this is an INFO message*',
"this is a DEBUG message*",
"this is a WARNING message*",
"this is a CRITICAL message*",
]
)
def test_qinfo(qtlog):
"""Test INFO messages when we have means to do so. Should be temporary until bindings
catch up and expose qInfo (or at least QMessageLogger), then we should update
the other logging tests properly. #232
"""
if qt_api.pytest_qt_api.startswith("pyside"):
assert (
qt_api.qInfo is None
), "pyside does not expose qInfo. If it does, update this test."
return
if qt_api.pytest_qt_api.startswith("pyqt4"):
pytest.skip("qInfo and QtInfoMsg not supported in PyQt 4")
qt_api.qInfo("this is an INFO message")
records = [(m.type, m.message.strip()) for m in qtlog.records]
assert records == [(qt_api.QtInfoMsg, "this is an INFO message")]
def test_qtlog_fixture(qtlog):
"""
Test qtlog fixture.
"""
# qInfo is not exposed by the bindings yet (#232)
qt_api.qDebug("this is a DEBUG message")
qt_api.qWarning("this is a WARNING message")
qt_api.qCritical("this is a CRITICAL message")
records = [(m.type, m.message.strip()) for m in qtlog.records]
assert records == [
(qt_api.QtDebugMsg, "this is a DEBUG message"),
(qt_api.QtWarningMsg, "this is a WARNING message"),
(qt_api.QtCriticalMsg, "this is a CRITICAL message"),
]
# `records` attribute is read-only
with pytest.raises(AttributeError):
qtlog.records = []
def test_fixture_with_logging_disabled(testdir):
"""
Test that qtlog fixture doesn't capture anything if logging is disabled
in the command line.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_types(qtlog):
qt_api.qWarning('message')
assert qtlog.records == []
"""
)
res = testdir.runpytest("--no-qt-log")
res.stdout.fnmatch_lines("*1 passed*")
@pytest.mark.parametrize("use_context_manager", [True, False])
def test_disable_qtlog_context_manager(testdir, use_context_manager):
"""
Test qtlog.disabled() context manager.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
if use_context_manager:
code = "with qtlog.disabled():"
else:
code = "if 1:"
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_1(qtlog):
{code}
qt_api.qCritical('message')
""".format(
code=code
)
)
res = testdir.inline_run()
passed = 1 if use_context_manager else 0
res.assertoutcome(passed=passed, failed=int(not passed))
@pytest.mark.parametrize("use_mark", [True, False])
def test_disable_qtlog_mark(testdir, use_mark):
"""
Test mark which disables logging capture for a test.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
mark = "@pytest.mark.no_qt_log" if use_mark else ""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
{mark}
def test_1():
qt_api.qCritical('message')
""".format(
mark=mark
)
)
res = testdir.inline_run()
passed = 1 if use_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_logging_formatting(testdir):
"""
Test custom formatting for logging messages.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_types():
qt_api.qWarning('this is a WARNING message')
assert 0
"""
)
f = "{rec.type_name} {rec.log_type_name} {rec.when:%Y-%m-%d}: {rec.message}"
res = testdir.runpytest("--qt-log-format={}".format(f))
today = "{:%Y-%m-%d}".format(datetime.datetime.now())
res.stdout.fnmatch_lines(
[
"*-- Captured Qt messages --*",
"QtWarningMsg WARNING {}: this is a WARNING message*".format(today),
]
)
@pytest.mark.parametrize(
"level, expect_passes", [("DEBUG", 1), ("WARNING", 2), ("CRITICAL", 3), ("NO", 4)]
)
def test_logging_fails_tests(testdir, level, expect_passes):
"""
Test qt_log_level_fail ini option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = {level}
""".format(
level=level
)
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_1():
qt_api.qDebug('this is a DEBUG message')
def test_2():
qt_api.qWarning('this is a WARNING message')
def test_3():
qt_api.qCritical('this is a CRITICAL message')
def test_4():
assert 1
"""
)
res = testdir.runpytest()
lines = []
if level != "NO":
lines.extend(
[
"*Failure: Qt messages with level {} or above emitted*".format(
level.upper()
),
"*-- Captured Qt messages --*",
]
)
lines.append("*{} passed*".format(expect_passes))
res.stdout.fnmatch_lines(lines)
def test_logging_fails_tests_mark(testdir):
"""
Test mark overrides what's configured in the ini file.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning
import pytest
@pytest.mark.qt_log_level_fail('WARNING')
def test_1():
qWarning('message')
"""
)
res = testdir.inline_run()
res.assertoutcome(failed=1)
def test_logging_fails_ignore(testdir):
"""
Test qt_log_ignore config option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore =
WM_DESTROY.*sent
WM_PAINT not handled
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
def test1():
qt_api.qCritical('a critical message')
def test2():
qt_api.qCritical('WM_DESTROY was sent')
def test3():
qt_api.qCritical('WM_DESTROY was sent')
assert 0
def test4():
qt_api.qCritical('WM_PAINT not handled')
qt_api.qCritical('another critical message')
"""
)
res = testdir.runpytest()
lines = [
# test1 fails because it has emitted a CRITICAL message and that message
# does not match any regex in qt_log_ignore
"*_ test1 _*",
"*Failure: Qt messages with level CRITICAL or above emitted*",
"*QtCriticalMsg: a critical message*",
# test2 succeeds because its message matches qt_log_ignore
# test3 fails because of an assert, but the ignored message should
# still appear in the failure message
"*_ test3 _*",
"*AssertionError*",
"*QtCriticalMsg: WM_DESTROY was sent*(IGNORED)*",
# test4 fails because one message is ignored but the other isn't
"*_ test4 _*",
"*Failure: Qt messages with level CRITICAL or above emitted*",
"*QtCriticalMsg: WM_PAINT not handled*(IGNORED)*",
"*QtCriticalMsg: another critical message*",
# summary
"*3 failed, 1 passed*",
]
res.stdout.fnmatch_lines(lines)
@pytest.mark.parametrize("message", ["match-global", "match-mark"])
@pytest.mark.parametrize("marker_args", ["'match-mark', extend=True", "'match-mark'"])
def test_logging_mark_with_extend(testdir, message, marker_args):
"""
Test qt_log_ignore mark with extend=True.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore = match-global
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_ignore({marker_args})
def test1():
qt_api.qCritical('{message}')
""".format(
message=message, marker_args=marker_args
)
)
res = testdir.inline_run()
res.assertoutcome(passed=1, failed=0)
@pytest.mark.parametrize(
"message, error_expected", [("match-global", True), ("match-mark", False)]
)
def test_logging_mark_without_extend(testdir, message, error_expected):
"""
Test qt_log_ignore mark with extend=False.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore = match-global
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_ignore('match-mark', extend=False)
def test1():
qt_api.qCritical('{message}')
""".format(
message=message
)
)
res = testdir.inline_run()
if error_expected:
res.assertoutcome(passed=0, failed=1)
else:
res.assertoutcome(passed=1, failed=0)
def test_logging_mark_with_invalid_argument(testdir):
"""
Test qt_log_ignore mark with invalid keyword argument.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.qt_log_ignore('match-mark', does_not_exist=True)
def test1():
pass
"""
)
res = testdir.runpytest()
lines = [
"*= ERRORS =*",
"*_ ERROR at setup of test1 _*",
"*ValueError: Invalid keyword arguments in {'does_not_exist': True} "
"for qt_log_ignore mark.",
# summary
"*= 1 error in*",
]
res.stdout.fnmatch_lines(lines)
@pytest.mark.parametrize("apply_mark", [True, False])
def test_logging_fails_ignore_mark_multiple(testdir, apply_mark):
"""
Make sure qt_log_ignore mark supports multiple arguments.
:type testdir: _pytest.pytester.TmpTestdir
"""
if apply_mark:
mark = '@pytest.mark.qt_log_ignore("WM_DESTROY", "WM_PAINT")'
else:
mark = ""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_level_fail('CRITICAL')
{mark}
def test1():
qt_api.qCritical('WM_PAINT was sent')
""".format(
mark=mark
)
)
res = testdir.inline_run()
passed = 1 if apply_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_lineno_failure(testdir):
"""
Test that tests when failing because log messages were emitted report
the correct line number.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = WARNING
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_foo():
assert foo() == 10
def foo():
qt_api.qWarning('this is a WARNING message')
return 10
"""
)
res = testdir.runpytest()
if qt_api.pytest_qt_api == "pyqt5":
res.stdout.fnmatch_lines(
[
"*test_lineno_failure.py:2: Failure*",
"*test_lineno_failure.py:foo:5:*",
" QtWarningMsg: this is a WARNING message",
]
)
else:
res.stdout.fnmatch_lines("*test_lineno_failure.py:2: Failure*")
def test_context_none(testdir):
"""
Sometimes PyQt5 will emit a context with some/all attributes set as None
instead of appropriate file, function and line number.
Test that when this happens the plugin doesn't break, and it filters
out the context information.
:type testdir: _pytest.pytester.TmpTestdir
"""
if qt_api.pytest_qt_api != "pyqt5":
pytest.skip("Context information only available in PyQt5")
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_foo(request):
log_capture = request.node.qt_log_capture
context = log_capture._Context(None, None, 0, None)
log_capture._handle_with_context(qt_api.QtWarningMsg,
context, "WARNING message")
assert 0
"""
)
res = testdir.runpytest()
assert "*None:None:0:*" not in str(res.stdout)
res.stdout.fnmatch_lines(["* QtWarningMsg: WARNING message*"])
def test_logging_broken_makereport(testdir):
"""
Make sure logging's makereport hookwrapper doesn't hide exceptions.
See https://github.com/pytest-dev/pytest-qt/issues/98
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
conftest="""
import pytest
@pytest.mark.hookwrapper(tryfirst=True)
def pytest_runtest_makereport(call):
if call.when == 'call':
raise Exception("This should not be hidden")
yield
"""
)
p = testdir.makepyfile(
"""
def test_foo():
pass
"""
)
res = testdir.runpytest_subprocess(p)
res.stdout.fnmatch_lines(["*This should not be hidden*"])
|
import datetime
import pytest
from pytestqt.qt_compat import qt_api
@pytest.mark.parametrize("test_succeeds", [True, False])
@pytest.mark.parametrize("qt_log", [True, False])
def test_basic_logging(testdir, test_succeeds, qt_log):
"""
Test Qt logging capture output.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
import sys
from pytestqt.qt_compat import qt_api
def to_unicode(s):
return s.decode('utf-8', 'replace') if isinstance(s, bytes) else s
if qt_api.qInstallMessageHandler:
def print_msg(msg_type, context, message):
sys.stderr.write(to_unicode(message) + '\\n')
qt_api.qInstallMessageHandler(print_msg)
else:
def print_msg(msg_type, message):
sys.stderr.write(to_unicode(message) + '\\n')
qt_api.qInstallMsgHandler(print_msg)
def test_types():
# qInfo is not exposed by the bindings yet (#225)
# qt_api.qInfo('this is an INFO message')
qt_api.qDebug('this is a DEBUG message')
qt_api.qWarning('this is a WARNING message')
qt_api.qCritical('this is a CRITICAL message')
assert {}
""".format(
test_succeeds
)
)
res = testdir.runpytest(*(["--no-qt-log"] if not qt_log else []))
if test_succeeds:
assert "Captured Qt messages" not in res.stdout.str()
assert "Captured stderr call" not in res.stdout.str()
else:
if qt_log:
res.stdout.fnmatch_lines(
[
"*-- Captured Qt messages --*",
# qInfo is not exposed by the bindings yet (#232)
# '*QtInfoMsg: this is an INFO message*',
"*QtDebugMsg: this is a DEBUG message*",
"*QtWarningMsg: this is a WARNING message*",
"*QtCriticalMsg: this is a CRITICAL message*",
]
)
else:
res.stdout.fnmatch_lines(
[
"*-- Captured stderr call --*",
# qInfo is not exposed by the bindings yet (#232)
# '*QtInfoMsg: this is an INFO message*',
# 'this is an INFO message*',
"this is a DEBUG message*",
"this is a WARNING message*",
"this is a CRITICAL message*",
]
)
def test_qinfo(qtlog):
"""Test INFO messages when we have means to do so. Should be temporary until bindings
catch up and expose qInfo (or at least QMessageLogger), then we should update
the other logging tests properly. #232
"""
if qt_api.pytest_qt_api.startswith("pyside"):
assert (
qt_api.qInfo is None
), "pyside does not expose qInfo. If it does, update this test."
return
if qt_api.pytest_qt_api.startswith("pyqt4"):
pytest.skip("qInfo and QtInfoMsg not supported in PyQt 4")
qt_api.qInfo("this is an INFO message")
records = [(m.type, m.message.strip()) for m in qtlog.records]
assert records == [(qt_api.QtInfoMsg, "this is an INFO message")]
def test_qtlog_fixture(qtlog):
"""
Test qtlog fixture.
"""
# qInfo is not exposed by the bindings yet (#232)
qt_api.qDebug("this is a DEBUG message")
qt_api.qWarning("this is a WARNING message")
qt_api.qCritical("this is a CRITICAL message")
records = [(m.type, m.message.strip()) for m in qtlog.records]
assert records == [
(qt_api.QtDebugMsg, "this is a DEBUG message"),
(qt_api.QtWarningMsg, "this is a WARNING message"),
(qt_api.QtCriticalMsg, "this is a CRITICAL message"),
]
# `records` attribute is read-only
with pytest.raises(AttributeError):
qtlog.records = []
def test_fixture_with_logging_disabled(testdir):
"""
Test that qtlog fixture doesn't capture anything if logging is disabled
in the command line.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_types(qtlog):
qt_api.qWarning('message')
assert qtlog.records == []
"""
)
res = testdir.runpytest("--no-qt-log")
res.stdout.fnmatch_lines("*1 passed*")
@pytest.mark.parametrize("use_context_manager", [True, False])
def test_disable_qtlog_context_manager(testdir, use_context_manager):
"""
Test qtlog.disabled() context manager.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
if use_context_manager:
code = "with qtlog.disabled():"
else:
code = "if 1:"
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_1(qtlog):
{code}
qt_api.qCritical('message')
""".format(
code=code
)
)
res = testdir.inline_run()
passed = 1 if use_context_manager else 0
res.assertoutcome(passed=passed, failed=int(not passed))
@pytest.mark.parametrize("use_mark", [True, False])
def test_disable_qtlog_mark(testdir, use_mark):
"""
Test mark which disables logging capture for a test.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
mark = "@pytest.mark.no_qt_log" if use_mark else ""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
{mark}
def test_1():
qt_api.qCritical('message')
""".format(
mark=mark
)
)
res = testdir.inline_run()
passed = 1 if use_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_logging_formatting(testdir):
"""
Test custom formatting for logging messages.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_types():
qt_api.qWarning('this is a WARNING message')
assert 0
"""
)
f = "{rec.type_name} {rec.log_type_name} {rec.when:%Y-%m-%d}: {rec.message}"
res = testdir.runpytest("--qt-log-format={}".format(f))
today = "{:%Y-%m-%d}".format(datetime.datetime.now())
res.stdout.fnmatch_lines(
[
"*-- Captured Qt messages --*",
"QtWarningMsg WARNING {}: this is a WARNING message*".format(today),
]
)
@pytest.mark.parametrize(
"level, expect_passes", [("DEBUG", 1), ("WARNING", 2), ("CRITICAL", 3), ("NO", 4)]
)
def test_logging_fails_tests(testdir, level, expect_passes):
"""
Test qt_log_level_fail ini option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = {level}
""".format(
level=level
)
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_1():
qt_api.qDebug('this is a DEBUG message')
def test_2():
qt_api.qWarning('this is a WARNING message')
def test_3():
qt_api.qCritical('this is a CRITICAL message')
def test_4():
assert 1
"""
)
res = testdir.runpytest()
lines = []
if level != "NO":
lines.extend(
[
"*Failure: Qt messages with level {} or above emitted*".format(
level.upper()
),
"*-- Captured Qt messages --*",
]
)
lines.append("*{} passed*".format(expect_passes))
res.stdout.fnmatch_lines(lines)
def test_logging_fails_tests_mark(testdir):
"""
Test mark overrides what's configured in the ini file.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qWarning
import pytest
@pytest.mark.qt_log_level_fail('WARNING')
def test_1():
qWarning('message')
"""
)
res = testdir.inline_run()
res.assertoutcome(failed=1)
def test_logging_fails_ignore(testdir):
"""
Test qt_log_ignore config option.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore =
WM_DESTROY.*sent
WM_PAINT not handled
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
def test1():
qt_api.qCritical('a critical message')
def test2():
qt_api.qCritical('WM_DESTROY was sent')
def test3():
qt_api.qCritical('WM_DESTROY was sent')
assert 0
def test4():
qt_api.qCritical('WM_PAINT not handled')
qt_api.qCritical('another critical message')
"""
)
res = testdir.runpytest()
lines = [
# test1 fails because it has emitted a CRITICAL message and that message
# does not match any regex in qt_log_ignore
"*_ test1 _*",
"*Failure: Qt messages with level CRITICAL or above emitted*",
"*QtCriticalMsg: a critical message*",
# test2 succeeds because its message matches qt_log_ignore
# test3 fails because of an assert, but the ignored message should
# still appear in the failure message
"*_ test3 _*",
"*AssertionError*",
"*QtCriticalMsg: WM_DESTROY was sent*(IGNORED)*",
# test4 fails because one message is ignored but the other isn't
"*_ test4 _*",
"*Failure: Qt messages with level CRITICAL or above emitted*",
"*QtCriticalMsg: WM_PAINT not handled*(IGNORED)*",
"*QtCriticalMsg: another critical message*",
# summary
"*3 failed, 1 passed*",
]
res.stdout.fnmatch_lines(lines)
@pytest.mark.parametrize("message", ["match-global", "match-mark"])
@pytest.mark.parametrize("marker_args", ["'match-mark', extend=True", "'match-mark'"])
def test_logging_mark_with_extend(testdir, message, marker_args):
"""
Test qt_log_ignore mark with extend=True.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore = match-global
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_ignore({marker_args})
def test1():
qt_api.qCritical('{message}')
""".format(
message=message, marker_args=marker_args
)
)
res = testdir.inline_run()
res.assertoutcome(passed=1, failed=0)
@pytest.mark.parametrize(
"message, error_expected", [("match-global", True), ("match-mark", False)]
)
def test_logging_mark_without_extend(testdir, message, error_expected):
"""
Test qt_log_ignore mark with extend=False.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = CRITICAL
qt_log_ignore = match-global
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_ignore('match-mark', extend=False)
def test1():
qt_api.qCritical('{message}')
""".format(
message=message
)
)
res = testdir.inline_run()
if error_expected:
res.assertoutcome(passed=0, failed=1)
else:
res.assertoutcome(passed=1, failed=0)
def test_logging_mark_with_invalid_argument(testdir):
"""
Test qt_log_ignore mark with invalid keyword argument.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
"""
import pytest
@pytest.mark.qt_log_ignore('match-mark', does_not_exist=True)
def test1():
pass
"""
)
res = testdir.runpytest()
lines = [
"*= ERRORS =*",
"*_ ERROR at setup of test1 _*",
"*ValueError: Invalid keyword arguments in {'does_not_exist': True} "
"for qt_log_ignore mark.",
# summary
"*= 1 error in*",
]
res.stdout.fnmatch_lines(lines)
@pytest.mark.parametrize("apply_mark", [True, False])
def test_logging_fails_ignore_mark_multiple(testdir, apply_mark):
"""
Make sure qt_log_ignore mark supports multiple arguments.
:type testdir: _pytest.pytester.TmpTestdir
"""
if apply_mark:
mark = '@pytest.mark.qt_log_ignore("WM_DESTROY", "WM_PAINT")'
else:
mark = ""
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
import pytest
@pytest.mark.qt_log_level_fail('CRITICAL')
{mark}
def test1():
qt_api.qCritical('WM_PAINT was sent')
""".format(
mark=mark
)
)
res = testdir.inline_run()
passed = 1 if apply_mark else 0
res.assertoutcome(passed=passed, failed=int(not passed))
def test_lineno_failure(testdir):
"""
Test that tests when failing because log messages were emitted report
the correct line number.
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makeini(
"""
[pytest]
qt_log_level_fail = WARNING
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_foo():
assert foo() == 10
def foo():
qt_api.qWarning('this is a WARNING message')
return 10
"""
)
res = testdir.runpytest()
if qt_api.pytest_qt_api == "pyqt5":
res.stdout.fnmatch_lines(
[
"*test_lineno_failure.py:2: Failure*",
"*test_lineno_failure.py:foo:5:*",
" QtWarningMsg: this is a WARNING message",
]
)
else:
res.stdout.fnmatch_lines("*test_lineno_failure.py:2: Failure*")
def test_context_none(testdir):
"""
Sometimes PyQt5 will emit a context with some/all attributes set as None
instead of appropriate file, function and line number.
Test that when this happens the plugin doesn't break, and it filters
out the context information.
:type testdir: _pytest.pytester.TmpTestdir
"""
if qt_api.pytest_qt_api != "pyqt5":
pytest.skip("Context information only available in PyQt5")
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
def test_foo(request):
log_capture = request.node.qt_log_capture
context = log_capture._Context(None, None, 0, None)
log_capture._handle_with_context(qt_api.QtWarningMsg,
context, "WARNING message")
assert 0
"""
)
res = testdir.runpytest()
assert "*None:None:0:*" not in str(res.stdout)
res.stdout.fnmatch_lines(["* QtWarningMsg: WARNING message*"])
def test_logging_broken_makereport(testdir):
"""
Make sure logging's makereport hookwrapper doesn't hide exceptions.
See https://github.com/pytest-dev/pytest-qt/issues/98
:type testdir: _pytest.pytester.TmpTestdir
"""
testdir.makepyfile(
conftest="""
import pytest
@pytest.mark.hookwrapper(tryfirst=True)
def pytest_runtest_makereport(call):
if call.when == 'call':
raise Exception("This should not be hidden")
yield
"""
)
p = testdir.makepyfile(
"""
def test_foo():
pass
"""
)
res = testdir.runpytest_subprocess(p)
res.stdout.fnmatch_lines(["*This should not be hidden*"])
|
en
| 0.492985
|
Test Qt logging capture output. :type testdir: _pytest.pytester.TmpTestdir import sys from pytestqt.qt_compat import qt_api def to_unicode(s): return s.decode('utf-8', 'replace') if isinstance(s, bytes) else s if qt_api.qInstallMessageHandler: def print_msg(msg_type, context, message): sys.stderr.write(to_unicode(message) + '\\n') qt_api.qInstallMessageHandler(print_msg) else: def print_msg(msg_type, message): sys.stderr.write(to_unicode(message) + '\\n') qt_api.qInstallMsgHandler(print_msg) def test_types(): # qInfo is not exposed by the bindings yet (#225) # qt_api.qInfo('this is an INFO message') qt_api.qDebug('this is a DEBUG message') qt_api.qWarning('this is a WARNING message') qt_api.qCritical('this is a CRITICAL message') assert {} # qInfo is not exposed by the bindings yet (#232) # '*QtInfoMsg: this is an INFO message*', # qInfo is not exposed by the bindings yet (#232) # '*QtInfoMsg: this is an INFO message*', # 'this is an INFO message*', Test INFO messages when we have means to do so. Should be temporary until bindings catch up and expose qInfo (or at least QMessageLogger), then we should update the other logging tests properly. #232 Test qtlog fixture. # qInfo is not exposed by the bindings yet (#232) # `records` attribute is read-only Test that qtlog fixture doesn't capture anything if logging is disabled in the command line. :type testdir: _pytest.pytester.TmpTestdir from pytestqt.qt_compat import qt_api def test_types(qtlog): qt_api.qWarning('message') assert qtlog.records == [] Test qtlog.disabled() context manager. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL from pytestqt.qt_compat import qt_api def test_1(qtlog): {code} qt_api.qCritical('message') Test mark which disables logging capture for a test. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL from pytestqt.qt_compat import qt_api import pytest {mark} def test_1(): qt_api.qCritical('message') Test custom formatting for logging messages. :type testdir: _pytest.pytester.TmpTestdir from pytestqt.qt_compat import qt_api def test_types(): qt_api.qWarning('this is a WARNING message') assert 0 Test qt_log_level_fail ini option. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = {level} from pytestqt.qt_compat import qt_api def test_1(): qt_api.qDebug('this is a DEBUG message') def test_2(): qt_api.qWarning('this is a WARNING message') def test_3(): qt_api.qCritical('this is a CRITICAL message') def test_4(): assert 1 Test mark overrides what's configured in the ini file. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL from pytestqt.qt_compat import qWarning import pytest @pytest.mark.qt_log_level_fail('WARNING') def test_1(): qWarning('message') Test qt_log_ignore config option. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL qt_log_ignore = WM_DESTROY.*sent WM_PAINT not handled from pytestqt.qt_compat import qt_api import pytest def test1(): qt_api.qCritical('a critical message') def test2(): qt_api.qCritical('WM_DESTROY was sent') def test3(): qt_api.qCritical('WM_DESTROY was sent') assert 0 def test4(): qt_api.qCritical('WM_PAINT not handled') qt_api.qCritical('another critical message') # test1 fails because it has emitted a CRITICAL message and that message # does not match any regex in qt_log_ignore # test2 succeeds because its message matches qt_log_ignore # test3 fails because of an assert, but the ignored message should # still appear in the failure message # test4 fails because one message is ignored but the other isn't # summary Test qt_log_ignore mark with extend=True. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL qt_log_ignore = match-global from pytestqt.qt_compat import qt_api import pytest @pytest.mark.qt_log_ignore({marker_args}) def test1(): qt_api.qCritical('{message}') Test qt_log_ignore mark with extend=False. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = CRITICAL qt_log_ignore = match-global from pytestqt.qt_compat import qt_api import pytest @pytest.mark.qt_log_ignore('match-mark', extend=False) def test1(): qt_api.qCritical('{message}') Test qt_log_ignore mark with invalid keyword argument. :type testdir: _pytest.pytester.TmpTestdir import pytest @pytest.mark.qt_log_ignore('match-mark', does_not_exist=True) def test1(): pass # summary Make sure qt_log_ignore mark supports multiple arguments. :type testdir: _pytest.pytester.TmpTestdir from pytestqt.qt_compat import qt_api import pytest @pytest.mark.qt_log_level_fail('CRITICAL') {mark} def test1(): qt_api.qCritical('WM_PAINT was sent') Test that tests when failing because log messages were emitted report the correct line number. :type testdir: _pytest.pytester.TmpTestdir [pytest] qt_log_level_fail = WARNING from pytestqt.qt_compat import qt_api def test_foo(): assert foo() == 10 def foo(): qt_api.qWarning('this is a WARNING message') return 10 Sometimes PyQt5 will emit a context with some/all attributes set as None instead of appropriate file, function and line number. Test that when this happens the plugin doesn't break, and it filters out the context information. :type testdir: _pytest.pytester.TmpTestdir from pytestqt.qt_compat import qt_api def test_foo(request): log_capture = request.node.qt_log_capture context = log_capture._Context(None, None, 0, None) log_capture._handle_with_context(qt_api.QtWarningMsg, context, "WARNING message") assert 0 Make sure logging's makereport hookwrapper doesn't hide exceptions. See https://github.com/pytest-dev/pytest-qt/issues/98 :type testdir: _pytest.pytester.TmpTestdir import pytest @pytest.mark.hookwrapper(tryfirst=True) def pytest_runtest_makereport(call): if call.when == 'call': raise Exception("This should not be hidden") yield def test_foo(): pass
| 2.174088
| 2
|
scan/fetchers/kube/kube_fetch_oteps_vpp.py
|
korenlev/calipso-cvim
| 0
|
6625385
|
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from base.utils.constants import KubeVedgeType
from scan.fetchers.kube.kube_fetch_oteps_base import KubeFetchOtepsBase
class KubeFetchOtepsVpp(KubeFetchOtepsBase):
OTEP_UDP_PORT = 8285
def get(self, vedge_id) -> list:
host_id = vedge_id.replace('-VPP', '') if vedge_id.endswith('-VPP') \
else vedge_id
host = self.inv.get_by_id(self.get_env(), host_id)
if not host:
self.log.error('failed to find host by ID: {}'.format(host_id))
return []
host_interfaces = host.get('interfaces', [])
vpp_interface = next((interface for interface in host_interfaces if interface['name'].startswith('vpp1-')), {})
ip_address = vpp_interface.get('IP Address', '')
otep_mac = vpp_interface.get('mac_address')
overlay_type = self.configuration.environment.get('type_drivers')
doc = {
'id': '{}-otep'.format(host_id),
'name': '{}-otep'.format(host['name']),
'host': host['name'],
'parent_type': 'vedge',
'parent_id': vedge_id,
'ip_address': ip_address,
'overlay_type': overlay_type,
'overlay_mac_address': otep_mac,
'ports': self.get_ports(host['name'], ip_address, overlay_type),
'udp_port': self.OTEP_UDP_PORT,
'vedge_type': KubeVedgeType.VPP.value
}
return [doc]
PORT_ID_PREFIX = 'vxlan-remote-'
@classmethod
def get_port_id(cls, remote_host_id: str) -> str:
return '{}{}'.format(cls.PORT_ID_PREFIX, remote_host_id)
@classmethod
def get_port(cls, overlay_type: str, local_ip: str,
remote_ip: str, remote_host: str) -> dict:
port_id = cls.get_port_id(remote_host)
return {
'name': port_id,
'type': overlay_type,
'remote_host': remote_host,
'interface': port_id,
'options': {
'local_ip': local_ip,
'remote_ip': remote_ip
}
}
|
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from base.utils.constants import KubeVedgeType
from scan.fetchers.kube.kube_fetch_oteps_base import KubeFetchOtepsBase
class KubeFetchOtepsVpp(KubeFetchOtepsBase):
OTEP_UDP_PORT = 8285
def get(self, vedge_id) -> list:
host_id = vedge_id.replace('-VPP', '') if vedge_id.endswith('-VPP') \
else vedge_id
host = self.inv.get_by_id(self.get_env(), host_id)
if not host:
self.log.error('failed to find host by ID: {}'.format(host_id))
return []
host_interfaces = host.get('interfaces', [])
vpp_interface = next((interface for interface in host_interfaces if interface['name'].startswith('vpp1-')), {})
ip_address = vpp_interface.get('IP Address', '')
otep_mac = vpp_interface.get('mac_address')
overlay_type = self.configuration.environment.get('type_drivers')
doc = {
'id': '{}-otep'.format(host_id),
'name': '{}-otep'.format(host['name']),
'host': host['name'],
'parent_type': 'vedge',
'parent_id': vedge_id,
'ip_address': ip_address,
'overlay_type': overlay_type,
'overlay_mac_address': otep_mac,
'ports': self.get_ports(host['name'], ip_address, overlay_type),
'udp_port': self.OTEP_UDP_PORT,
'vedge_type': KubeVedgeType.VPP.value
}
return [doc]
PORT_ID_PREFIX = 'vxlan-remote-'
@classmethod
def get_port_id(cls, remote_host_id: str) -> str:
return '{}{}'.format(cls.PORT_ID_PREFIX, remote_host_id)
@classmethod
def get_port(cls, overlay_type: str, local_ip: str,
remote_ip: str, remote_host: str) -> dict:
port_id = cls.get_port_id(remote_host)
return {
'name': port_id,
'type': overlay_type,
'remote_host': remote_host,
'interface': port_id,
'options': {
'local_ip': local_ip,
'remote_ip': remote_ip
}
}
|
en
| 0.431477
|
############################################################################### # Copyright (c) 2017-2020 <NAME> (Cisco Systems), # # <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others # # # # All rights reserved. This program and the accompanying materials # # are made available under the terms of the Apache License, Version 2.0 # # which accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # ###############################################################################
| 2.075571
| 2
|
model_innondations/pipeline/download-climateknowledgeportal.py
|
PayjPan/batch8_worldbank
| 0
|
6625386
|
<gh_stars>0
# -*- coding: utf-8 -*-
import requests
import os, sys
import pandas as pd
import urllib.parse
import logging
#import asyncio
import concurrent.futures
from pathlib import Path
'''
Script to download dataset from https://climateknowledgeportal.worldbank.org/api/data/get-download-data
'''
DATASET_FOLDER = '../../datasets/'
# Destination
PATH = os.path.join(DATASET_FOLDER, 'precipitation/')
nature_of_data = ['projection', 'historical']
# Read countries list
df = pd.read_csv('../../datasets/worldbank_countries.csv')
countries_code = df.code.to_list()
countries_name = df.name.to_list()
variables = ['pr']
past_time_series = ["1901-2016"]
futu_time_series = ["2020_2039", "2040_2059", "2060_2079", "2080_2099"]
logger = logging.getLogger("download")
formatter = logging.Formatter("%(asctime)s - %(name)-12s %(levelname)-8s %(message)s")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("download.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(f'Starting...')
'''
Pour la construire : https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/ projection/historical (2 options)
/ mavg/manom for mean/change (2 options)
/ climate variable pr/tas for precipitation/temperature (45 options in futur, 2 in past)
/ Scenario : rcp85 (4 options in futur, 0 in past)
/ period : 2080_2099 (4 options in futur, 5 in past)
/ country code (197 options)
/ country name (197 options)
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/historical/pr/1931-1960/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2080-2099/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060-2079/FRA/France
'''
def get_url(url, destination):
if Path(destination).is_file():
logger.info(f'{destination} already exist ! No download.')
return False
logger.debug(f'{url} -> {destination}')
# Retreive the content
try:
r = requests.get(url)
content = r.content
if r.status_code != 200:
logger.error(f'ERROR HTTP : {r.status_code} for {url}')
return False
if len(r.content) < 1_000:
logger.error(f'ERROR HTTP content too small : {content} for {url}')
return False
with open(destination, 'wb') as f:
f.write(content)
return True
except:
logger.error(f'Unexpected ERROR for {url}: {sys.exc_info()[0]}')
return False
nb_iter = 0
#asyncloop = asyncio.get_event_loop()
#tasks = []
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
futures = []
for country_code, country_name in zip(countries_code, countries_name):
for nature in nature_of_data:
time_series=past_time_series if nature == 'historical' else futu_time_series
data_type = '' if nature == 'historical' else '/mavg'
projection = '' if nature == 'historical' else '/rcp85'
for period in time_series:
nb_iter += 1
# Build URL
url = 'https://climateknowledgeportal.worldbank.org/api/data/get-download-data/' \
+ f'{nature}{data_type}/pr{projection}/{period}/{country_code}/{urllib.parse.quote_plus(country_name)}'
# build destination name
filename = '_'.join([nature, period, country_code]) + '.csv'
destination = os.path.join(PATH, filename)
#tasks.append(asyncloop.create_task(get_url(url, destination)))
futures.append(executor.submit(get_url, url=url, destination=destination))
for future in concurrent.futures.as_completed(futures):
#print(future.result())
logger.debug(f'Done {future.result()}')
# for task in tasks:
# await task
logger.info(f'Done after {nb_iter} iterations.')
# https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/BDI/Burundi
|
# -*- coding: utf-8 -*-
import requests
import os, sys
import pandas as pd
import urllib.parse
import logging
#import asyncio
import concurrent.futures
from pathlib import Path
'''
Script to download dataset from https://climateknowledgeportal.worldbank.org/api/data/get-download-data
'''
DATASET_FOLDER = '../../datasets/'
# Destination
PATH = os.path.join(DATASET_FOLDER, 'precipitation/')
nature_of_data = ['projection', 'historical']
# Read countries list
df = pd.read_csv('../../datasets/worldbank_countries.csv')
countries_code = df.code.to_list()
countries_name = df.name.to_list()
variables = ['pr']
past_time_series = ["1901-2016"]
futu_time_series = ["2020_2039", "2040_2059", "2060_2079", "2080_2099"]
logger = logging.getLogger("download")
formatter = logging.Formatter("%(asctime)s - %(name)-12s %(levelname)-8s %(message)s")
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("download.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(f'Starting...')
'''
Pour la construire : https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/ projection/historical (2 options)
/ mavg/manom for mean/change (2 options)
/ climate variable pr/tas for precipitation/temperature (45 options in futur, 2 in past)
/ Scenario : rcp85 (4 options in futur, 0 in past)
/ period : 2080_2099 (4 options in futur, 5 in past)
/ country code (197 options)
/ country name (197 options)
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/historical/pr/1931-1960/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data
/projection/mavg/pr/rcp85/2080-2099/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/FRA/France
https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060-2079/FRA/France
'''
def get_url(url, destination):
if Path(destination).is_file():
logger.info(f'{destination} already exist ! No download.')
return False
logger.debug(f'{url} -> {destination}')
# Retreive the content
try:
r = requests.get(url)
content = r.content
if r.status_code != 200:
logger.error(f'ERROR HTTP : {r.status_code} for {url}')
return False
if len(r.content) < 1_000:
logger.error(f'ERROR HTTP content too small : {content} for {url}')
return False
with open(destination, 'wb') as f:
f.write(content)
return True
except:
logger.error(f'Unexpected ERROR for {url}: {sys.exc_info()[0]}')
return False
nb_iter = 0
#asyncloop = asyncio.get_event_loop()
#tasks = []
with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
futures = []
for country_code, country_name in zip(countries_code, countries_name):
for nature in nature_of_data:
time_series=past_time_series if nature == 'historical' else futu_time_series
data_type = '' if nature == 'historical' else '/mavg'
projection = '' if nature == 'historical' else '/rcp85'
for period in time_series:
nb_iter += 1
# Build URL
url = 'https://climateknowledgeportal.worldbank.org/api/data/get-download-data/' \
+ f'{nature}{data_type}/pr{projection}/{period}/{country_code}/{urllib.parse.quote_plus(country_name)}'
# build destination name
filename = '_'.join([nature, period, country_code]) + '.csv'
destination = os.path.join(PATH, filename)
#tasks.append(asyncloop.create_task(get_url(url, destination)))
futures.append(executor.submit(get_url, url=url, destination=destination))
for future in concurrent.futures.as_completed(futures):
#print(future.result())
logger.debug(f'Done {future.result()}')
# for task in tasks:
# await task
logger.info(f'Done after {nb_iter} iterations.')
# https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/BDI/Burundi
|
en
| 0.405887
|
# -*- coding: utf-8 -*- #import asyncio Script to download dataset from https://climateknowledgeportal.worldbank.org/api/data/get-download-data # Destination # Read countries list Pour la construire : https://climateknowledgeportal.worldbank.org/api/data/get-download-data / projection/historical (2 options) / mavg/manom for mean/change (2 options) / climate variable pr/tas for precipitation/temperature (45 options in futur, 2 in past) / Scenario : rcp85 (4 options in futur, 0 in past) / period : 2080_2099 (4 options in futur, 5 in past) / country code (197 options) / country name (197 options) https://climateknowledgeportal.worldbank.org/api/data/get-download-data /historical/pr/1931-1960/FRA/France https://climateknowledgeportal.worldbank.org/api/data/get-download-data /projection/mavg/pr/rcp85/2060_2079/FRA/France https://climateknowledgeportal.worldbank.org/api/data/get-download-data /projection/mavg/pr/rcp85/2080-2099/FRA/France https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/FRA/France https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060-2079/FRA/France # Retreive the content #asyncloop = asyncio.get_event_loop() #tasks = [] # Build URL # build destination name #tasks.append(asyncloop.create_task(get_url(url, destination))) #print(future.result()) # for task in tasks: # await task # https://climateknowledgeportal.worldbank.org/api/data/get-download-data/projection/mavg/pr/rcp85/2060_2079/BDI/Burundi
| 2.699917
| 3
|
src/functions.py
|
gsc2001/ConvexNet
| 0
|
6625387
|
from torch import nn
from tqdm import tqdm
def fit(model: nn.Module, train_loader, val_loader, optimizer, criterion, epochs, lr_scheduler=None):
print('Starting fitting for', model.__class__.__name__)
for epoch in range(epochs):
# train epoch
# test epochs
# log results
# step lr_scheduler
pass
|
from torch import nn
from tqdm import tqdm
def fit(model: nn.Module, train_loader, val_loader, optimizer, criterion, epochs, lr_scheduler=None):
print('Starting fitting for', model.__class__.__name__)
for epoch in range(epochs):
# train epoch
# test epochs
# log results
# step lr_scheduler
pass
|
en
| 0.665912
|
# train epoch # test epochs # log results # step lr_scheduler
| 2.443553
| 2
|
scripts/generate_ibl_index.py
|
paranblues/dspbr-pt
| 0
|
6625388
|
<gh_stars>0
#!/usr/bin/env python
# @license
# Copyright 2020 <NAME> - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, glob
import json
path = "./assets/env/"
files = {}
paths = glob.glob(path + "**/*.hdr", recursive=True)
paths.sort()
for f in paths:
n = os.path.splitext(os.path.basename(f))[0]
files[n] = f
data_str = json.dumps(files, indent=4)
data_str = data_str.replace('\\\\', '/')
print(data_str)
with open(path + 'ibl_index.js', 'w') as outfile:
outfile.write("var ibl_index = ");
outfile.write(data_str)
outfile.write(";\n\n")
outfile.write("export default ibl_index;\n")
|
#!/usr/bin/env python
# @license
# Copyright 2020 <NAME> - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, glob
import json
path = "./assets/env/"
files = {}
paths = glob.glob(path + "**/*.hdr", recursive=True)
paths.sort()
for f in paths:
n = os.path.splitext(os.path.basename(f))[0]
files[n] = f
data_str = json.dumps(files, indent=4)
data_str = data_str.replace('\\\\', '/')
print(data_str)
with open(path + 'ibl_index.js', 'w') as outfile:
outfile.write("var ibl_index = ");
outfile.write(data_str)
outfile.write(";\n\n")
outfile.write("export default ibl_index;\n")
|
en
| 0.82511
|
#!/usr/bin/env python # @license # Copyright 2020 <NAME> - All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.293055
| 2
|
test/commands/extended/get_latest_inclusion_test.py
|
Cornode/cornode.lib.py
| 0
|
6625389
|
<filename>test/commands/extended/get_latest_inclusion_test.py
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from six import binary_type, text_type
from cornode import cornode, TransactionHash, TryteString
from cornode.adapter import MockAdapter
from cornode.commands.extended.get_latest_inclusion import \
GetLatestInclusionCommand
from cornode.filters import Trytes
class GetLatestInclusionRequestFilterTestCase(BaseFilterTestCase):
filter_type = GetLatestInclusionCommand(MockAdapter()).get_request_filter
skip_value_check = True
# noinspection SpellCheckingInspection
def setUp(self):
super(GetLatestInclusionRequestFilterTestCase, self).setUp()
self.hash1 = (
b'UBMJSEJDJLPDDJ99PISPI9VZSWBWBPZWVVFED9ED'
b'XSU9BHQHKMBMVURSZOSBIXJ9MBEOHVDPV9CWV9ECF'
)
self.hash2 = (
b'WGXG9AGGIVSE9NUEEVVFNJARM9ZWDDATZKPBBXFJ'
b'HFPGFPTQPHBCVIEYQWENDK9NMREIIBIWLZHRWRIPU'
)
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'hashes': [TransactionHash(self.hash1), TransactionHash(self.hash2)],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
Request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
'hashes': [
# Any TrytesCompatible value can be used here.
binary_type(self.hash1),
bytearray(self.hash2),
],
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'hashes': [
TransactionHash(self.hash1),
TransactionHash(self.hash2),
],
},
)
def test_fail_empty(self):
"""
Request is empty.
"""
self.assertFilterErrors(
{},
{
'hashes': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
Request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'hashes': [TransactionHash(self.hash1)],
# Uh, before we dock, I think we ought to discuss the bonus
# situation.
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_hashes_null(self):
"""
``hashes`` is null.
"""
self.assertFilterErrors(
{
'hashes': None,
},
{
'hashes': [f.Required.CODE_EMPTY],
},
)
def test_fail_hashes_wrong_type(self):
"""
``hashes`` is not an array.
"""
self.assertFilterErrors(
{
# It's gotta be an array, even if there's only one hash.
'hashes': TransactionHash(self.hash1),
},
{
'hashes': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_hashes_empty(self):
"""
``hashes`` is an array, but it is empty.
"""
self.assertFilterErrors(
{
'hashes': [],
},
{
'hashes': [f.Required.CODE_EMPTY],
},
)
def test_fail_hashes_contents_invalid(self):
"""
``hashes`` is a non-empty array, but it contains invalid values.
"""
self.assertFilterErrors(
{
'hashes': [
b'',
text_type(self.hash1, 'ascii'),
True,
None,
b'not valid trytes',
# This is actually valid; I just added it to make sure the
# filter isn't cheating!
TryteString(self.hash1),
2130706433,
b'9' * 82,
],
},
{
'hashes.0': [f.Required.CODE_EMPTY],
'hashes.1': [f.Type.CODE_WRONG_TYPE],
'hashes.2': [f.Type.CODE_WRONG_TYPE],
'hashes.3': [f.Required.CODE_EMPTY],
'hashes.4': [Trytes.CODE_NOT_TRYTES],
'hashes.6': [f.Type.CODE_WRONG_TYPE],
'hashes.7': [Trytes.CODE_WRONG_FORMAT],
},
)
class GetLatestInclusionCommandTestCase(TestCase):
# noinspection SpellCheckingInspection
def setUp(self):
super(GetLatestInclusionCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = GetLatestInclusionCommand(self.adapter)
# Define some tryte sequences that we can re-use across tests.
self.milestone =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999W9KDIH'
b'BALAYAFCADIDU9HCXDKIXEYDNFRAKHN9IEIDZFWGJ'
)
self.hash1 =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999TBPDM9'
b'ADFAWCKCSFUALFGETFIFG9UHIEFE9AYESEHDUBDDF'
)
self.hash2 =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999CIGCCF'
b'KIUFZF9EP9YEYGQAIEXDTEAAUGAEWBBASHYCWBHDX'
)
def test_wireup(self):
"""
Verify that the command is wired up correctly.
"""
self.assertIsInstance(
cornode(self.adapter).getLatestInclusion,
GetLatestInclusionCommand,
)
def test_happy_path(self):
"""
Successfully requesting latest inclusion state.
"""
self.adapter.seed_response('getNodeInfo', {
# ``getNodeInfo`` returns lots of info, but the only value that
# matters for this test is ``latestSolidSubtangleMilestone``.
'latestSolidSubtangleMilestone': self.milestone,
},
)
self.adapter.seed_response('getInclusionStates', {
'states': [True, False],
})
response = self.command(hashes=[self.hash1, self.hash2])
self.assertDictEqual(
response,
{
'states': {
self.hash1: True,
self.hash2: False,
},
}
)
|
<filename>test/commands/extended/get_latest_inclusion_test.py
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from six import binary_type, text_type
from cornode import cornode, TransactionHash, TryteString
from cornode.adapter import MockAdapter
from cornode.commands.extended.get_latest_inclusion import \
GetLatestInclusionCommand
from cornode.filters import Trytes
class GetLatestInclusionRequestFilterTestCase(BaseFilterTestCase):
filter_type = GetLatestInclusionCommand(MockAdapter()).get_request_filter
skip_value_check = True
# noinspection SpellCheckingInspection
def setUp(self):
super(GetLatestInclusionRequestFilterTestCase, self).setUp()
self.hash1 = (
b'UBMJSEJDJLPDDJ99PISPI9VZSWBWBPZWVVFED9ED'
b'XSU9BHQHKMBMVURSZOSBIXJ9MBEOHVDPV9CWV9ECF'
)
self.hash2 = (
b'WGXG9AGGIVSE9NUEEVVFNJARM9ZWDDATZKPBBXFJ'
b'HFPGFPTQPHBCVIEYQWENDK9NMREIIBIWLZHRWRIPU'
)
def test_pass_happy_path(self):
"""
Request is valid.
"""
request = {
'hashes': [TransactionHash(self.hash1), TransactionHash(self.hash2)],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, request)
def test_pass_compatible_types(self):
"""
Request contains values that can be converted to the expected
types.
"""
filter_ = self._filter({
'hashes': [
# Any TrytesCompatible value can be used here.
binary_type(self.hash1),
bytearray(self.hash2),
],
})
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'hashes': [
TransactionHash(self.hash1),
TransactionHash(self.hash2),
],
},
)
def test_fail_empty(self):
"""
Request is empty.
"""
self.assertFilterErrors(
{},
{
'hashes': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_unexpected_parameters(self):
"""
Request contains unexpected parameters.
"""
self.assertFilterErrors(
{
'hashes': [TransactionHash(self.hash1)],
# Uh, before we dock, I think we ought to discuss the bonus
# situation.
'foo': 'bar',
},
{
'foo': [f.FilterMapper.CODE_EXTRA_KEY],
},
)
def test_fail_hashes_null(self):
"""
``hashes`` is null.
"""
self.assertFilterErrors(
{
'hashes': None,
},
{
'hashes': [f.Required.CODE_EMPTY],
},
)
def test_fail_hashes_wrong_type(self):
"""
``hashes`` is not an array.
"""
self.assertFilterErrors(
{
# It's gotta be an array, even if there's only one hash.
'hashes': TransactionHash(self.hash1),
},
{
'hashes': [f.Type.CODE_WRONG_TYPE],
},
)
def test_fail_hashes_empty(self):
"""
``hashes`` is an array, but it is empty.
"""
self.assertFilterErrors(
{
'hashes': [],
},
{
'hashes': [f.Required.CODE_EMPTY],
},
)
def test_fail_hashes_contents_invalid(self):
"""
``hashes`` is a non-empty array, but it contains invalid values.
"""
self.assertFilterErrors(
{
'hashes': [
b'',
text_type(self.hash1, 'ascii'),
True,
None,
b'not valid trytes',
# This is actually valid; I just added it to make sure the
# filter isn't cheating!
TryteString(self.hash1),
2130706433,
b'9' * 82,
],
},
{
'hashes.0': [f.Required.CODE_EMPTY],
'hashes.1': [f.Type.CODE_WRONG_TYPE],
'hashes.2': [f.Type.CODE_WRONG_TYPE],
'hashes.3': [f.Required.CODE_EMPTY],
'hashes.4': [Trytes.CODE_NOT_TRYTES],
'hashes.6': [f.Type.CODE_WRONG_TYPE],
'hashes.7': [Trytes.CODE_WRONG_FORMAT],
},
)
class GetLatestInclusionCommandTestCase(TestCase):
# noinspection SpellCheckingInspection
def setUp(self):
super(GetLatestInclusionCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = GetLatestInclusionCommand(self.adapter)
# Define some tryte sequences that we can re-use across tests.
self.milestone =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999W9KDIH'
b'BALAYAFCADIDU9HCXDKIXEYDNFRAKHN9IEIDZFWGJ'
)
self.hash1 =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999TBPDM9'
b'ADFAWCKCSFUALFGETFIFG9UHIEFE9AYESEHDUBDDF'
)
self.hash2 =\
TransactionHash(
b'TESTVALUE9DONTUSEINPRODUCTION99999CIGCCF'
b'KIUFZF9EP9YEYGQAIEXDTEAAUGAEWBBASHYCWBHDX'
)
def test_wireup(self):
"""
Verify that the command is wired up correctly.
"""
self.assertIsInstance(
cornode(self.adapter).getLatestInclusion,
GetLatestInclusionCommand,
)
def test_happy_path(self):
"""
Successfully requesting latest inclusion state.
"""
self.adapter.seed_response('getNodeInfo', {
# ``getNodeInfo`` returns lots of info, but the only value that
# matters for this test is ``latestSolidSubtangleMilestone``.
'latestSolidSubtangleMilestone': self.milestone,
},
)
self.adapter.seed_response('getInclusionStates', {
'states': [True, False],
})
response = self.command(hashes=[self.hash1, self.hash2])
self.assertDictEqual(
response,
{
'states': {
self.hash1: True,
self.hash2: False,
},
}
)
|
en
| 0.876585
|
# coding=utf-8 # noinspection SpellCheckingInspection Request is valid. Request contains values that can be converted to the expected types. # Any TrytesCompatible value can be used here. Request is empty. Request contains unexpected parameters. # Uh, before we dock, I think we ought to discuss the bonus # situation. ``hashes`` is null. ``hashes`` is not an array. # It's gotta be an array, even if there's only one hash. ``hashes`` is an array, but it is empty. ``hashes`` is a non-empty array, but it contains invalid values. # This is actually valid; I just added it to make sure the # filter isn't cheating! # noinspection SpellCheckingInspection # Define some tryte sequences that we can re-use across tests. Verify that the command is wired up correctly. Successfully requesting latest inclusion state. # ``getNodeInfo`` returns lots of info, but the only value that # matters for this test is ``latestSolidSubtangleMilestone``.
| 2.245391
| 2
|
tests/web_platform/css_grid_1/grid_items/test_grid_inline_order_property_auto_placement.py
|
jonboland/colosseum
| 71
|
6625390
|
<reponame>jonboland/colosseum
from tests.utils import W3CTestCase
class TestGridInlineOrderPropertyAutoPlacement(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-inline-order-property-auto-placement-'))
|
from tests.utils import W3CTestCase
class TestGridInlineOrderPropertyAutoPlacement(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-inline-order-property-auto-placement-'))
|
none
| 1
| 1.710456
| 2
|
|
dig/phone/matchphone.py
|
usc-isi-i2/dig-features
| 0
|
6625391
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: matchphone.py
'''
dig.phone.matchphone
@author: <NAME>
@version 2.5
wat phone matching module
needs to get its data from watpara
Usage:
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
\t-s, --source:\tsource default backpage
'''
import sys
import re
import csv
import argparse
AREA_CODES= set()
with open('area_code.tsv') as tsv:
for row in csv.reader(tsv, delimiter="\t"):
(id, adm1_abbrev, adm1_name, cities, iso3166_a2_code, country_id) = row
AREA_CODES.add(int(id))
VERSION = '2.5'
REVISION = "$Revision: 24407 $".replace("$","")
VERBOSE = True
def validAreaCode(ac):
try:
return int(ac) in AREA_CODES
except:
return False
def validPhoneNumber(ph, testAreaCode=True):
m = re.search(r"""^[2-9]\d{2}[2-9]\d{6}$""", ph)
if m:
if testAreaCode:
return validAreaCode(ph[0:3])
else:
return True
else:
return False
def cleanPhoneText(text):
text = text.lower()
# simply remove numeric entities
text = re.sub(r"""&#\d{1,3};""", "", text, flags=re.I)
# re.sub(pattern,replacement,string, flags=re.I | re.G)
# misspelled numeral words
text = re.sub(r"""th0usand""", "thousand", text, flags=re.I)
text = re.sub(r"""th1rteen""", "thirteen", text, flags=re.I)
text = re.sub(r"""f0urteen""", "fourteen", text, flags=re.I)
text = re.sub(r"""e1ghteen""", "eighteen", text, flags=re.I)
text = re.sub(r"""n1neteen""", "nineteen", text, flags=re.I)
text = re.sub(r"""f1fteen""", "fifteen", text, flags=re.I)
text = re.sub(r"""s1xteen""", "sixteen", text, flags=re.I)
text = re.sub(r"""th1rty""", "thirty", text, flags=re.I)
text = re.sub(r"""e1ghty""", "eighty", text, flags=re.I)
text = re.sub(r"""n1nety""", "ninety", text, flags=re.I)
text = re.sub(r"""fourty""", "forty", text, flags=re.I)
text = re.sub(r"""f0urty""", "forty", text, flags=re.I)
text = re.sub(r"""e1ght""", "eight", text, flags=re.I)
text = re.sub(r"""f0rty""", "forty", text, flags=re.I)
text = re.sub(r"""f1fty""", "fifty", text, flags=re.I)
text = re.sub(r"""s1xty""", "sixty", text, flags=re.I)
text = re.sub(r"""zer0""", "zero", text, flags=re.I)
text = re.sub(r"""f0ur""", "four", text, flags=re.I)
text = re.sub(r"""f1ve""", "five", text, flags=re.I)
text = re.sub(r"""n1ne""", "nine", text, flags=re.I)
text = re.sub(r"""0ne""", "one", text, flags=re.I)
text = re.sub(r"""tw0""", "two", text, flags=re.I)
text = re.sub(r"""s1x""", "six", text, flags=re.I)
# mixed compound numeral words
# consider 7teen, etc.
text = re.sub(r"""twenty[\\W_]{0,3}1""", "twenty-one", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}2""", "twenty-two", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}3""", "twenty-three", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}4""", "twenty-four", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}5""", "twenty-five", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}6""", "twenty-six", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}7""", "twenty-seven", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}8""", "twenty-eight", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}9""", "twenty-nine", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}1""", "thirty-one", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}2""", "thirty-two", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}3""", "thirty-three", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}4""", "thirty-four", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}5""", "thirty-five", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}6""", "thirty-six", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}7""", "thirty-seven", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}8""", "thirty-eight", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}9""", "thirty-nine", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}1""", "forty-one", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}2""", "forty-two", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}3""", "forty-three", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}4""", "forty-four", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}5""", "forty-five", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}6""", "forty-six", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}7""", "forty-seven", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}8""", "forty-eight", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}9""", "forty-nine", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}1""", "fifty-one", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}2""", "fifty-two", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}3""", "fifty-three", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}4""", "fifty-four", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}5""", "fifty-five", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}6""", "fifty-six", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}7""", "fifty-seven", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}8""", "fifty-eight", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}9""", "fifty-nine", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}1""", "sixty-one", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}2""", "sixty-two", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}3""", "sixty-three", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}4""", "sixty-four", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}5""", "sixty-five", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}6""", "sixty-six", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}7""", "sixty-seven", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}8""", "sixty-eight", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}9""", "sixty-nine", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}1""", "seventy-one", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}2""", "seventy-two", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}3""", "seventy-three", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}4""", "seventy-four", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}5""", "seventy-five", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}6""", "seventy-six", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}7""", "seventy-seven", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}8""", "seventy-eight", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}9""", "seventy-nine", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}1""", "eighty-one", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}2""", "eighty-two", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}3""", "eighty-three", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}4""", "eighty-four", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}5""", "eighty-five", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}6""", "eighty-six", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}7""", "eighty-seven", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}8""", "eighty-eight", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}9""", "eighty-nine", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}1""", "ninety-one", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}2""", "ninety-two", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}3""", "ninety-three", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}4""", "ninety-four", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}5""", "ninety-five", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}6""", "ninety-six", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}7""", "ninety-seven", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}8""", "ninety-eight", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}9""", "ninety-nine", text, flags=re.I)
# now resolve compound numeral words
# allow twenty-one, twentyone, twenty_one, twenty one
text = re.sub(r"""twenty[ _-]?one""", "21", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?two""", "22", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?three""", "23", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?four""", "24", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?five""", "25", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?six""", "26", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?seven""", "27", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?eight""", "28", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?nine""", "29", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?one""", "31", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?two""", "32", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?three""", "33", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?four""", "34", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?five""", "35", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?six""", "36", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?seven""", "37", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?eight""", "38", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?nine""", "39", text, flags=re.I)
text = re.sub(r"""forty[ _-]?one""", "41", text, flags=re.I)
text = re.sub(r"""forty[ _-]?two""", "42", text, flags=re.I)
text = re.sub(r"""forty[ _-]?three""", "43", text, flags=re.I)
text = re.sub(r"""forty[ _-]?four""", "44", text, flags=re.I)
text = re.sub(r"""forty[ _-]?five""", "45", text, flags=re.I)
text = re.sub(r"""forty[ _-]?six""", "46", text, flags=re.I)
text = re.sub(r"""forty[ _-]?seven""", "47", text, flags=re.I)
text = re.sub(r"""forty[ _-]?eight""", "48", text, flags=re.I)
text = re.sub(r"""forty[ _-]?nine""", "49", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?one""", "51", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?two""", "52", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?three""", "53", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?four""", "54", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?five""", "55", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?six""", "56", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?seven""", "57", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?eight""", "58", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?nine""", "59", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?one""", "61", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?two""", "62", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?three""", "63", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?four""", "64", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?five""", "65", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?six""", "66", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?seven""", "67", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?eight""", "68", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?nine""", "69", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?one""", "71", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?two""", "72", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?three""", "73", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?four""", "74", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?five""", "75", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?six""", "76", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?seven""", "77", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?eight""", "78", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?nine""", "79", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?one""", "81", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?two""", "82", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?three""", "83", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?four""", "84", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?five""", "85", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?six""", "86", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?seven""", "87", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?eight""", "88", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?nine""", "89", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?one""", "91", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?two""", "92", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?three""", "93", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?four""", "94", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?five""", "95", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?six""", "96", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?seven""", "97", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?eight""", "98", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?nine""", "99", text, flags=re.I)
# larger units function as suffixes now
# assume never have three hundred four, three hundred and four
text = re.sub(r"""hundred""", "00", text, flags=re.I)
text = re.sub(r"""thousand""", "000", text, flags=re.I)
# single numeral words now
# some would have been ambiguous
text = re.sub(r"""seventeen""", "17", text, flags=re.I)
text = re.sub(r"""thirteen""", "13", text, flags=re.I)
text = re.sub(r"""fourteen""", "14", text, flags=re.I)
text = re.sub(r"""eighteen""", "18", text, flags=re.I)
text = re.sub(r"""nineteen""", "19", text, flags=re.I)
text = re.sub(r"""fifteen""", "15", text, flags=re.I)
text = re.sub(r"""sixteen""", "16", text, flags=re.I)
text = re.sub(r"""seventy""", "70", text, flags=re.I)
text = re.sub(r"""eleven""", "11", text, flags=re.I)
text = re.sub(r"""twelve""", "12", text, flags=re.I)
text = re.sub(r"""twenty""", "20", text, flags=re.I)
text = re.sub(r"""thirty""", "30", text, flags=re.I)
text = re.sub(r"""eighty""", "80", text, flags=re.I)
text = re.sub(r"""ninety""", "90", text, flags=re.I)
text = re.sub(r"""three""", "3", text, flags=re.I)
text = re.sub(r"""seven""", "7", text, flags=re.I)
text = re.sub(r"""eight""", "8", text, flags=re.I)
text = re.sub(r"""forty""", "40", text, flags=re.I)
text = re.sub(r"""fifty""", "50", text, flags=re.I)
text = re.sub(r"""sixty""", "60", text, flags=re.I)
text = re.sub(r"""zero""", "0", text, flags=re.I)
text = re.sub(r"""four""", "4", text, flags=re.I)
text = re.sub(r"""five""", "5", text, flags=re.I)
text = re.sub(r"""nine""", "9", text, flags=re.I)
text = re.sub(r"""one""", "1", text, flags=re.I)
text = re.sub(r"""two""", "2", text, flags=re.I)
text = re.sub(r"""six""", "6", text, flags=re.I)
text = re.sub(r"""ten""", "10", text, flags=re.I)
# now do letter for digit substitutions
text = re.sub(r"""oh""", "0", text, flags=re.I)
text = re.sub(r"""o""", "0", text, flags=re.I)
text = re.sub(r"""i""", "1", text, flags=re.I)
text = re.sub(r"""l""", "1", text, flags=re.I)
return text
def makePhoneRegexp():
return re.compile(r"""([[{(<]{0,3}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}\d[\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,3}\d)""")
PHONE_REGEXP = makePhoneRegexp()
# 3 May 2012
# new strategy: skip finditer, do the indexing ourselves
def genPhones(text):
text = cleanPhoneText(text)
regex = PHONE_REGEXP
idx = 0
m = regex.search(text, idx)
while m:
g = m.group(1)
start = m.start(1)
end = m.end(1)
digits = re.sub(r"""\D+""", "", g)
prefix = text[start-1:start] if start>0 else None
if digits[0:2] == '82' and prefix == '*':
# this number overlaps with a *82 sequence
idx += 2
elif not validAreaCode(digits[0:3]):
# probably a price, height, etc.
idx += 1
else:
# seems good
yield digits
idx = end
m = regex.search(text, idx)
def extractPhoneNumbers(text):
return [ph for ph in genPhones(text)]
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('inputFile', nargs='?', default=None, help='input text file')
parser.add_argument('-v','--verbose', required=False, help='verbose', action='store_true')
args=parser.parse_args()
inputFile = args.inputFile
verbose = args.verbose
if inputFile:
inf = open(inputFile)
else:
inf = sys.stdin
print extractPhoneNumbers(inf.read())
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: matchphone.py
'''
dig.phone.matchphone
@author: <NAME>
@version 2.5
wat phone matching module
needs to get its data from watpara
Usage:
Options:
\t-h, --help:\tprint help to STDOUT and quit
\t-v, --verbose:\tverbose output
\t-s, --source:\tsource default backpage
'''
import sys
import re
import csv
import argparse
AREA_CODES= set()
with open('area_code.tsv') as tsv:
for row in csv.reader(tsv, delimiter="\t"):
(id, adm1_abbrev, adm1_name, cities, iso3166_a2_code, country_id) = row
AREA_CODES.add(int(id))
VERSION = '2.5'
REVISION = "$Revision: 24407 $".replace("$","")
VERBOSE = True
def validAreaCode(ac):
try:
return int(ac) in AREA_CODES
except:
return False
def validPhoneNumber(ph, testAreaCode=True):
m = re.search(r"""^[2-9]\d{2}[2-9]\d{6}$""", ph)
if m:
if testAreaCode:
return validAreaCode(ph[0:3])
else:
return True
else:
return False
def cleanPhoneText(text):
text = text.lower()
# simply remove numeric entities
text = re.sub(r"""&#\d{1,3};""", "", text, flags=re.I)
# re.sub(pattern,replacement,string, flags=re.I | re.G)
# misspelled numeral words
text = re.sub(r"""th0usand""", "thousand", text, flags=re.I)
text = re.sub(r"""th1rteen""", "thirteen", text, flags=re.I)
text = re.sub(r"""f0urteen""", "fourteen", text, flags=re.I)
text = re.sub(r"""e1ghteen""", "eighteen", text, flags=re.I)
text = re.sub(r"""n1neteen""", "nineteen", text, flags=re.I)
text = re.sub(r"""f1fteen""", "fifteen", text, flags=re.I)
text = re.sub(r"""s1xteen""", "sixteen", text, flags=re.I)
text = re.sub(r"""th1rty""", "thirty", text, flags=re.I)
text = re.sub(r"""e1ghty""", "eighty", text, flags=re.I)
text = re.sub(r"""n1nety""", "ninety", text, flags=re.I)
text = re.sub(r"""fourty""", "forty", text, flags=re.I)
text = re.sub(r"""f0urty""", "forty", text, flags=re.I)
text = re.sub(r"""e1ght""", "eight", text, flags=re.I)
text = re.sub(r"""f0rty""", "forty", text, flags=re.I)
text = re.sub(r"""f1fty""", "fifty", text, flags=re.I)
text = re.sub(r"""s1xty""", "sixty", text, flags=re.I)
text = re.sub(r"""zer0""", "zero", text, flags=re.I)
text = re.sub(r"""f0ur""", "four", text, flags=re.I)
text = re.sub(r"""f1ve""", "five", text, flags=re.I)
text = re.sub(r"""n1ne""", "nine", text, flags=re.I)
text = re.sub(r"""0ne""", "one", text, flags=re.I)
text = re.sub(r"""tw0""", "two", text, flags=re.I)
text = re.sub(r"""s1x""", "six", text, flags=re.I)
# mixed compound numeral words
# consider 7teen, etc.
text = re.sub(r"""twenty[\\W_]{0,3}1""", "twenty-one", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}2""", "twenty-two", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}3""", "twenty-three", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}4""", "twenty-four", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}5""", "twenty-five", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}6""", "twenty-six", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}7""", "twenty-seven", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}8""", "twenty-eight", text, flags=re.I)
text = re.sub(r"""twenty[\\W_]{0,3}9""", "twenty-nine", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}1""", "thirty-one", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}2""", "thirty-two", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}3""", "thirty-three", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}4""", "thirty-four", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}5""", "thirty-five", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}6""", "thirty-six", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}7""", "thirty-seven", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}8""", "thirty-eight", text, flags=re.I)
text = re.sub(r"""thirty[\\W_]{0,3}9""", "thirty-nine", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}1""", "forty-one", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}2""", "forty-two", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}3""", "forty-three", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}4""", "forty-four", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}5""", "forty-five", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}6""", "forty-six", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}7""", "forty-seven", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}8""", "forty-eight", text, flags=re.I)
text = re.sub(r"""forty[\\W_]{0,3}9""", "forty-nine", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}1""", "fifty-one", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}2""", "fifty-two", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}3""", "fifty-three", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}4""", "fifty-four", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}5""", "fifty-five", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}6""", "fifty-six", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}7""", "fifty-seven", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}8""", "fifty-eight", text, flags=re.I)
text = re.sub(r"""fifty[\\W_]{0,3}9""", "fifty-nine", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}1""", "sixty-one", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}2""", "sixty-two", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}3""", "sixty-three", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}4""", "sixty-four", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}5""", "sixty-five", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}6""", "sixty-six", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}7""", "sixty-seven", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}8""", "sixty-eight", text, flags=re.I)
text = re.sub(r"""sixty[\\W_]{0,3}9""", "sixty-nine", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}1""", "seventy-one", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}2""", "seventy-two", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}3""", "seventy-three", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}4""", "seventy-four", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}5""", "seventy-five", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}6""", "seventy-six", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}7""", "seventy-seven", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}8""", "seventy-eight", text, flags=re.I)
text = re.sub(r"""seventy[\\W_]{0,3}9""", "seventy-nine", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}1""", "eighty-one", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}2""", "eighty-two", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}3""", "eighty-three", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}4""", "eighty-four", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}5""", "eighty-five", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}6""", "eighty-six", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}7""", "eighty-seven", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}8""", "eighty-eight", text, flags=re.I)
text = re.sub(r"""eighty[\\W_]{0,3}9""", "eighty-nine", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}1""", "ninety-one", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}2""", "ninety-two", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}3""", "ninety-three", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}4""", "ninety-four", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}5""", "ninety-five", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}6""", "ninety-six", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}7""", "ninety-seven", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}8""", "ninety-eight", text, flags=re.I)
text = re.sub(r"""ninety[\\W_]{0,3}9""", "ninety-nine", text, flags=re.I)
# now resolve compound numeral words
# allow twenty-one, twentyone, twenty_one, twenty one
text = re.sub(r"""twenty[ _-]?one""", "21", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?two""", "22", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?three""", "23", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?four""", "24", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?five""", "25", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?six""", "26", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?seven""", "27", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?eight""", "28", text, flags=re.I)
text = re.sub(r"""twenty[ _-]?nine""", "29", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?one""", "31", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?two""", "32", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?three""", "33", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?four""", "34", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?five""", "35", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?six""", "36", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?seven""", "37", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?eight""", "38", text, flags=re.I)
text = re.sub(r"""thirty[ _-]?nine""", "39", text, flags=re.I)
text = re.sub(r"""forty[ _-]?one""", "41", text, flags=re.I)
text = re.sub(r"""forty[ _-]?two""", "42", text, flags=re.I)
text = re.sub(r"""forty[ _-]?three""", "43", text, flags=re.I)
text = re.sub(r"""forty[ _-]?four""", "44", text, flags=re.I)
text = re.sub(r"""forty[ _-]?five""", "45", text, flags=re.I)
text = re.sub(r"""forty[ _-]?six""", "46", text, flags=re.I)
text = re.sub(r"""forty[ _-]?seven""", "47", text, flags=re.I)
text = re.sub(r"""forty[ _-]?eight""", "48", text, flags=re.I)
text = re.sub(r"""forty[ _-]?nine""", "49", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?one""", "51", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?two""", "52", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?three""", "53", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?four""", "54", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?five""", "55", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?six""", "56", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?seven""", "57", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?eight""", "58", text, flags=re.I)
text = re.sub(r"""fifty[ _-]?nine""", "59", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?one""", "61", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?two""", "62", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?three""", "63", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?four""", "64", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?five""", "65", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?six""", "66", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?seven""", "67", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?eight""", "68", text, flags=re.I)
text = re.sub(r"""sixty[ _-]?nine""", "69", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?one""", "71", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?two""", "72", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?three""", "73", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?four""", "74", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?five""", "75", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?six""", "76", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?seven""", "77", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?eight""", "78", text, flags=re.I)
text = re.sub(r"""seventy[ _-]?nine""", "79", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?one""", "81", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?two""", "82", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?three""", "83", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?four""", "84", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?five""", "85", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?six""", "86", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?seven""", "87", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?eight""", "88", text, flags=re.I)
text = re.sub(r"""eighty[ _-]?nine""", "89", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?one""", "91", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?two""", "92", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?three""", "93", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?four""", "94", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?five""", "95", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?six""", "96", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?seven""", "97", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?eight""", "98", text, flags=re.I)
text = re.sub(r"""ninety[ _-]?nine""", "99", text, flags=re.I)
# larger units function as suffixes now
# assume never have three hundred four, three hundred and four
text = re.sub(r"""hundred""", "00", text, flags=re.I)
text = re.sub(r"""thousand""", "000", text, flags=re.I)
# single numeral words now
# some would have been ambiguous
text = re.sub(r"""seventeen""", "17", text, flags=re.I)
text = re.sub(r"""thirteen""", "13", text, flags=re.I)
text = re.sub(r"""fourteen""", "14", text, flags=re.I)
text = re.sub(r"""eighteen""", "18", text, flags=re.I)
text = re.sub(r"""nineteen""", "19", text, flags=re.I)
text = re.sub(r"""fifteen""", "15", text, flags=re.I)
text = re.sub(r"""sixteen""", "16", text, flags=re.I)
text = re.sub(r"""seventy""", "70", text, flags=re.I)
text = re.sub(r"""eleven""", "11", text, flags=re.I)
text = re.sub(r"""twelve""", "12", text, flags=re.I)
text = re.sub(r"""twenty""", "20", text, flags=re.I)
text = re.sub(r"""thirty""", "30", text, flags=re.I)
text = re.sub(r"""eighty""", "80", text, flags=re.I)
text = re.sub(r"""ninety""", "90", text, flags=re.I)
text = re.sub(r"""three""", "3", text, flags=re.I)
text = re.sub(r"""seven""", "7", text, flags=re.I)
text = re.sub(r"""eight""", "8", text, flags=re.I)
text = re.sub(r"""forty""", "40", text, flags=re.I)
text = re.sub(r"""fifty""", "50", text, flags=re.I)
text = re.sub(r"""sixty""", "60", text, flags=re.I)
text = re.sub(r"""zero""", "0", text, flags=re.I)
text = re.sub(r"""four""", "4", text, flags=re.I)
text = re.sub(r"""five""", "5", text, flags=re.I)
text = re.sub(r"""nine""", "9", text, flags=re.I)
text = re.sub(r"""one""", "1", text, flags=re.I)
text = re.sub(r"""two""", "2", text, flags=re.I)
text = re.sub(r"""six""", "6", text, flags=re.I)
text = re.sub(r"""ten""", "10", text, flags=re.I)
# now do letter for digit substitutions
text = re.sub(r"""oh""", "0", text, flags=re.I)
text = re.sub(r"""o""", "0", text, flags=re.I)
text = re.sub(r"""i""", "1", text, flags=re.I)
text = re.sub(r"""l""", "1", text, flags=re.I)
return text
def makePhoneRegexp():
return re.compile(r"""([[{(<]{0,3}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}\d[\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,3}\d)""")
PHONE_REGEXP = makePhoneRegexp()
# 3 May 2012
# new strategy: skip finditer, do the indexing ourselves
def genPhones(text):
text = cleanPhoneText(text)
regex = PHONE_REGEXP
idx = 0
m = regex.search(text, idx)
while m:
g = m.group(1)
start = m.start(1)
end = m.end(1)
digits = re.sub(r"""\D+""", "", g)
prefix = text[start-1:start] if start>0 else None
if digits[0:2] == '82' and prefix == '*':
# this number overlaps with a *82 sequence
idx += 2
elif not validAreaCode(digits[0:3]):
# probably a price, height, etc.
idx += 1
else:
# seems good
yield digits
idx = end
m = regex.search(text, idx)
def extractPhoneNumbers(text):
return [ph for ph in genPhones(text)]
def main(argv=None):
'''this is called if run from command line'''
parser = argparse.ArgumentParser()
parser.add_argument('inputFile', nargs='?', default=None, help='input text file')
parser.add_argument('-v','--verbose', required=False, help='verbose', action='store_true')
args=parser.parse_args()
inputFile = args.inputFile
verbose = args.verbose
if inputFile:
inf = open(inputFile)
else:
inf = sys.stdin
print extractPhoneNumbers(inf.read())
# call main() if this is run as standalone
if __name__ == "__main__":
sys.exit(main())
|
en
| 0.42563
|
#!/usr/bin/python # -*- coding: utf-8 -*- # Filename: matchphone.py dig.phone.matchphone @author: <NAME> @version 2.5 wat phone matching module needs to get its data from watpara Usage: Options: \t-h, --help:\tprint help to STDOUT and quit \t-v, --verbose:\tverbose output \t-s, --source:\tsource default backpage ^[2-9]\d{2}[2-9]\d{6}$ # simply remove numeric entities &#\d{1,3}; # re.sub(pattern,replacement,string, flags=re.I | re.G) # misspelled numeral words th0usand th1rteen f0urteen e1ghteen n1neteen f1fteen s1xteen th1rty e1ghty n1nety fourty f0urty e1ght f0rty f1fty s1xty zer0 f0ur f1ve n1ne 0ne tw0 s1x # mixed compound numeral words # consider 7teen, etc. twenty[\\W_]{0,3}1 twenty[\\W_]{0,3}2 twenty[\\W_]{0,3}3 twenty[\\W_]{0,3}4 twenty[\\W_]{0,3}5 twenty[\\W_]{0,3}6 twenty[\\W_]{0,3}7 twenty[\\W_]{0,3}8 twenty[\\W_]{0,3}9 thirty[\\W_]{0,3}1 thirty[\\W_]{0,3}2 thirty[\\W_]{0,3}3 thirty[\\W_]{0,3}4 thirty[\\W_]{0,3}5 thirty[\\W_]{0,3}6 thirty[\\W_]{0,3}7 thirty[\\W_]{0,3}8 thirty[\\W_]{0,3}9 forty[\\W_]{0,3}1 forty[\\W_]{0,3}2 forty[\\W_]{0,3}3 forty[\\W_]{0,3}4 forty[\\W_]{0,3}5 forty[\\W_]{0,3}6 forty[\\W_]{0,3}7 forty[\\W_]{0,3}8 forty[\\W_]{0,3}9 fifty[\\W_]{0,3}1 fifty[\\W_]{0,3}2 fifty[\\W_]{0,3}3 fifty[\\W_]{0,3}4 fifty[\\W_]{0,3}5 fifty[\\W_]{0,3}6 fifty[\\W_]{0,3}7 fifty[\\W_]{0,3}8 fifty[\\W_]{0,3}9 sixty[\\W_]{0,3}1 sixty[\\W_]{0,3}2 sixty[\\W_]{0,3}3 sixty[\\W_]{0,3}4 sixty[\\W_]{0,3}5 sixty[\\W_]{0,3}6 sixty[\\W_]{0,3}7 sixty[\\W_]{0,3}8 sixty[\\W_]{0,3}9 seventy[\\W_]{0,3}1 seventy[\\W_]{0,3}2 seventy[\\W_]{0,3}3 seventy[\\W_]{0,3}4 seventy[\\W_]{0,3}5 seventy[\\W_]{0,3}6 seventy[\\W_]{0,3}7 seventy[\\W_]{0,3}8 seventy[\\W_]{0,3}9 eighty[\\W_]{0,3}1 eighty[\\W_]{0,3}2 eighty[\\W_]{0,3}3 eighty[\\W_]{0,3}4 eighty[\\W_]{0,3}5 eighty[\\W_]{0,3}6 eighty[\\W_]{0,3}7 eighty[\\W_]{0,3}8 eighty[\\W_]{0,3}9 ninety[\\W_]{0,3}1 ninety[\\W_]{0,3}2 ninety[\\W_]{0,3}3 ninety[\\W_]{0,3}4 ninety[\\W_]{0,3}5 ninety[\\W_]{0,3}6 ninety[\\W_]{0,3}7 ninety[\\W_]{0,3}8 ninety[\\W_]{0,3}9 # now resolve compound numeral words # allow twenty-one, twentyone, twenty_one, twenty one twenty[ _-]?one twenty[ _-]?two twenty[ _-]?three twenty[ _-]?four twenty[ _-]?five twenty[ _-]?six twenty[ _-]?seven twenty[ _-]?eight twenty[ _-]?nine thirty[ _-]?one thirty[ _-]?two thirty[ _-]?three thirty[ _-]?four thirty[ _-]?five thirty[ _-]?six thirty[ _-]?seven thirty[ _-]?eight thirty[ _-]?nine forty[ _-]?one forty[ _-]?two forty[ _-]?three forty[ _-]?four forty[ _-]?five forty[ _-]?six forty[ _-]?seven forty[ _-]?eight forty[ _-]?nine fifty[ _-]?one fifty[ _-]?two fifty[ _-]?three fifty[ _-]?four fifty[ _-]?five fifty[ _-]?six fifty[ _-]?seven fifty[ _-]?eight fifty[ _-]?nine sixty[ _-]?one sixty[ _-]?two sixty[ _-]?three sixty[ _-]?four sixty[ _-]?five sixty[ _-]?six sixty[ _-]?seven sixty[ _-]?eight sixty[ _-]?nine seventy[ _-]?one seventy[ _-]?two seventy[ _-]?three seventy[ _-]?four seventy[ _-]?five seventy[ _-]?six seventy[ _-]?seven seventy[ _-]?eight seventy[ _-]?nine eighty[ _-]?one eighty[ _-]?two eighty[ _-]?three eighty[ _-]?four eighty[ _-]?five eighty[ _-]?six eighty[ _-]?seven eighty[ _-]?eight eighty[ _-]?nine ninety[ _-]?one ninety[ _-]?two ninety[ _-]?three ninety[ _-]?four ninety[ _-]?five ninety[ _-]?six ninety[ _-]?seven ninety[ _-]?eight ninety[ _-]?nine # larger units function as suffixes now # assume never have three hundred four, three hundred and four hundred thousand # single numeral words now # some would have been ambiguous seventeen thirteen fourteen eighteen nineteen fifteen sixteen seventy eleven twelve twenty thirty eighty ninety three seven eight forty fifty sixty zero four five nine one two six ten # now do letter for digit substitutions oh o i l ([[{(<]{0,3}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}[2-9][\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,6}\d[\W_]{0,3}\d[\W_]{0,3}\d[\W_]{0,3}\d) # 3 May 2012 # new strategy: skip finditer, do the indexing ourselves \D+ # this number overlaps with a *82 sequence # probably a price, height, etc. # seems good this is called if run from command line # call main() if this is run as standalone
| 3.043629
| 3
|
Programiz/Calculate the Area of a Triangle/Sol.py
|
Pandz18/C-Programs
| 0
|
6625392
|
<filename>Programiz/Calculate the Area of a Triangle/Sol.py
h=int(input("Enter height"))
w=int(input("Enter width"))
area=(h*w)/2
print("Area of the triangle: " + str(area))
|
<filename>Programiz/Calculate the Area of a Triangle/Sol.py
h=int(input("Enter height"))
w=int(input("Enter width"))
area=(h*w)/2
print("Area of the triangle: " + str(area))
|
none
| 1
| 4.0988
| 4
|
|
tests/test_notes.py
|
danilakritsky/pt1-43-21-14
| 0
|
6625393
|
"""This modules contains tests for the api routes."""
import json
from pydantic import ValidationError
import pytest
import unittest.mock as mock
from app.api import crud
from bson.objectid import ObjectId
def test_create_note(test_client, test_note_id, monkeypatch):
test_request_body = {"title": "New Note", "body": "Hello, World!"}
test_response_body = {
"_id": test_note_id,
"title": "New Note",
"body": "Hello, World!",
}
async def mock_create(data):
return test_response_body
monkeypatch.setattr(crud, "create", mock_create)
response = test_client.post(
"/notes", data=json.dumps(test_request_body)
) # jsonifies the body
print(response.json()) # jsonifies response
assert response.status_code == 200
assert response.json() == test_response_body
def test_create_note_invalid(test_client, monkeypatch):
response = test_client.post("/notes", data=json.dumps({"title": 1}))
assert response.status_code == 422
def test_read_note(test_client, test_note_id, monkeypatch):
test_data = {
"_id": test_note_id,
"title": "Note to be read.",
"body": "Read!",
}
async def mock_read(id):
return test_data
monkeypatch.setattr(crud, "read", mock_read)
response = test_client.get(f"/notes/{test_note_id}")
assert response.status_code == 200
assert response.json() == test_data
def test_read_note_invalid_id(test_client, monkeypatch):
async def mock_read(id):
return None
monkeypatch.setattr(crud, "read", mock_read)
response = test_client.get("/notes/42")
assert response.status_code == 404
assert response.json()["detail"] == "Note '42' not found"
def test_read_all_notes(test_client, test_note_id, monkeypatch):
test_data = [
{
"_id": test_note_id,
"title": "something",
"body": "something else",
},
{
"_id": test_note_id.replace("1", "2"),
"title": "someone",
"body": "someone else",
},
]
async def mock_read_all():
return test_data
monkeypatch.setattr(crud, "read_all", mock_read_all)
response = test_client.get("/notes")
assert response.status_code == 200
assert response.json() == test_data
def test_update_note(test_client, test_note_id, monkeypatch):
test_update_data = {"title": "Set this new title", "body": "Set this new body"}
async def mock_update(id, new_data):
return {**{"_id": test_note_id}, **test_update_data}
monkeypatch.setattr(crud, "update", mock_update)
response = test_client.put(f"/notes/{test_note_id}", data=json.dumps(test_update_data))
# TODO
assert response.status_code == 200
assert response.json() == {**{"_id": test_note_id}, **test_update_data}
@pytest.mark.parametrize(
"id, new_data",
[
["0", {}],
["1", {"title": "Set this title"}],
["2", {"title": "Set this title", "body": "Set this body"}],
],
)
def test_update_note_indalid_id(test_client, monkeypatch, id, new_data):
async def mock_update(id, new_data):
return None
monkeypatch.setattr(crud, "update", mock_update)
response = test_client.put(f"/notes/{id}", data=json.dumps(new_data))
assert response.status_code == 404
def test_delete_note(test_client, monkeypatch, test_note_id):
# create a mock DeleteResult object
mock_resp = mock.MagicMock()
mock_resp.__bool__.return_value = True
mock_resp.deleted_count = 1
async def mock_delete(id):
return mock_resp
monkeypatch.setattr(crud, "delete", mock_delete)
response = test_client.delete(f"/notes/{test_note_id}")
assert response.status_code == 204
def test_delete_note_invalid_id(test_client, monkeypatch, test_note_id):
async def mock_delete(id):
return False
response = test_client.delete(f"/notes/{test_note_id}")
assert response.status_code == 404
assert response.json()["detail"] == f"Note {test_note_id!r} not found"
|
"""This modules contains tests for the api routes."""
import json
from pydantic import ValidationError
import pytest
import unittest.mock as mock
from app.api import crud
from bson.objectid import ObjectId
def test_create_note(test_client, test_note_id, monkeypatch):
test_request_body = {"title": "New Note", "body": "Hello, World!"}
test_response_body = {
"_id": test_note_id,
"title": "New Note",
"body": "Hello, World!",
}
async def mock_create(data):
return test_response_body
monkeypatch.setattr(crud, "create", mock_create)
response = test_client.post(
"/notes", data=json.dumps(test_request_body)
) # jsonifies the body
print(response.json()) # jsonifies response
assert response.status_code == 200
assert response.json() == test_response_body
def test_create_note_invalid(test_client, monkeypatch):
response = test_client.post("/notes", data=json.dumps({"title": 1}))
assert response.status_code == 422
def test_read_note(test_client, test_note_id, monkeypatch):
test_data = {
"_id": test_note_id,
"title": "Note to be read.",
"body": "Read!",
}
async def mock_read(id):
return test_data
monkeypatch.setattr(crud, "read", mock_read)
response = test_client.get(f"/notes/{test_note_id}")
assert response.status_code == 200
assert response.json() == test_data
def test_read_note_invalid_id(test_client, monkeypatch):
async def mock_read(id):
return None
monkeypatch.setattr(crud, "read", mock_read)
response = test_client.get("/notes/42")
assert response.status_code == 404
assert response.json()["detail"] == "Note '42' not found"
def test_read_all_notes(test_client, test_note_id, monkeypatch):
test_data = [
{
"_id": test_note_id,
"title": "something",
"body": "something else",
},
{
"_id": test_note_id.replace("1", "2"),
"title": "someone",
"body": "someone else",
},
]
async def mock_read_all():
return test_data
monkeypatch.setattr(crud, "read_all", mock_read_all)
response = test_client.get("/notes")
assert response.status_code == 200
assert response.json() == test_data
def test_update_note(test_client, test_note_id, monkeypatch):
test_update_data = {"title": "Set this new title", "body": "Set this new body"}
async def mock_update(id, new_data):
return {**{"_id": test_note_id}, **test_update_data}
monkeypatch.setattr(crud, "update", mock_update)
response = test_client.put(f"/notes/{test_note_id}", data=json.dumps(test_update_data))
# TODO
assert response.status_code == 200
assert response.json() == {**{"_id": test_note_id}, **test_update_data}
@pytest.mark.parametrize(
"id, new_data",
[
["0", {}],
["1", {"title": "Set this title"}],
["2", {"title": "Set this title", "body": "Set this body"}],
],
)
def test_update_note_indalid_id(test_client, monkeypatch, id, new_data):
async def mock_update(id, new_data):
return None
monkeypatch.setattr(crud, "update", mock_update)
response = test_client.put(f"/notes/{id}", data=json.dumps(new_data))
assert response.status_code == 404
def test_delete_note(test_client, monkeypatch, test_note_id):
# create a mock DeleteResult object
mock_resp = mock.MagicMock()
mock_resp.__bool__.return_value = True
mock_resp.deleted_count = 1
async def mock_delete(id):
return mock_resp
monkeypatch.setattr(crud, "delete", mock_delete)
response = test_client.delete(f"/notes/{test_note_id}")
assert response.status_code == 204
def test_delete_note_invalid_id(test_client, monkeypatch, test_note_id):
async def mock_delete(id):
return False
response = test_client.delete(f"/notes/{test_note_id}")
assert response.status_code == 404
assert response.json()["detail"] == f"Note {test_note_id!r} not found"
|
en
| 0.560004
|
This modules contains tests for the api routes. # jsonifies the body # jsonifies response # TODO # create a mock DeleteResult object
| 2.650434
| 3
|
sources/rnt/mediane/algorithms/ailon/RepeatChoice.py
|
bryan-brancotte/rank-aggregation-with-ties
| 0
|
6625394
|
from typing import List
from random import shuffle
from mediane.algorithms.median_ranking import MedianRanking, DistanceNotHandledException
from mediane.distances.enumeration import GENERALIZED_KENDALL_TAU_DISTANCE, GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE, \
GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION
from mediane.normalizations.unification import Unification
from numpy import zeros, argmax, asarray, array_equal, array
from functools import cmp_to_key
class RepeatChoice(MedianRanking):
def compute_median_rankings(
self,
rankings: List[List[List[int]]],
distance,
return_at_most_one_ranking: bool = False)-> List[List[List[int]]]:
"""
:param rankings: A set of rankings
:type rankings: list
:param distance: The distance to use/consider
:type distance: Distance
:param return_at_most_one_ranking: the algorithm should not return more than one ranking
:type return_at_most_one_ranking: bool
:return one or more consensus if the underlying algorithm can find multiple solution as good as each other.
If the algorithm is not able to provide multiple consensus, or if return_at_most_one_ranking is True then, it
should return a list made of the only / the first consensus found
:raise DistanceNotHandledException when the algorit
hm cannot compute the consensus following the distance given
as parameter
"""
scoring_scheme = asarray(distance.scoring_scheme)
if array_equal(scoring_scheme, array([[0, 1, 1, 0, 1, 1], [1, 1, 0, 1, 1, 0]])):
dst = 0
elif array_equal(scoring_scheme, array([[0, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1]])):
dst = 1
elif array_equal(scoring_scheme, array([[0, 1, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0]])):
dst = 2
else:
raise DistanceNotHandledException
if dst == 0:
rankings_to_use = Unification.rankings_to_rankings(rankings)
else:
rankings_to_use = rankings
nb_rankings = len(rankings_to_use)
rankings_copy = list(rankings_to_use)
shuffle(rankings_copy)
h = {}
id_ranking = 0
for ranking in rankings_copy:
id_bucket = 0
for bucket in ranking:
for element in bucket:
if element not in h:
h[element] = zeros(nb_rankings, dtype=int) - 1
h[element][id_ranking] = id_bucket
id_bucket += 1
id_ranking += 1
res = []
for el in sorted(h.items(), key=cmp_to_key(RepeatChoice.__compare)):
res.append([el[0]])
# kem = KemenyComputingFactory(scoring_scheme=self.scoring_scheme)
# kem = KendallTauGeneralizedNlogN()
return [res]
@staticmethod
def __compare(e1: tuple, e2: tuple) -> int:
first_ind_array_e1_inf_array_e2 = argmax(e1[1] < e2[1])
first_ind_array_e2_inf_array_e1 = argmax(e2[1] < e1[1])
if first_ind_array_e1_inf_array_e2 < first_ind_array_e2_inf_array_e1:
return -1
elif first_ind_array_e2_inf_array_e1 < first_ind_array_e1_inf_array_e2:
return 1
return 0
def is_breaking_ties_arbitrarily(self):
return True
def is_using_random_value(self):
return True
def get_full_name(self):
return "Repeat Choice"
def get_handled_distances(self):
"""
:return: a list of distances from distance_enumeration
"""
return (
GENERALIZED_KENDALL_TAU_DISTANCE,
GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE,
GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION
)
|
from typing import List
from random import shuffle
from mediane.algorithms.median_ranking import MedianRanking, DistanceNotHandledException
from mediane.distances.enumeration import GENERALIZED_KENDALL_TAU_DISTANCE, GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE, \
GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION
from mediane.normalizations.unification import Unification
from numpy import zeros, argmax, asarray, array_equal, array
from functools import cmp_to_key
class RepeatChoice(MedianRanking):
def compute_median_rankings(
self,
rankings: List[List[List[int]]],
distance,
return_at_most_one_ranking: bool = False)-> List[List[List[int]]]:
"""
:param rankings: A set of rankings
:type rankings: list
:param distance: The distance to use/consider
:type distance: Distance
:param return_at_most_one_ranking: the algorithm should not return more than one ranking
:type return_at_most_one_ranking: bool
:return one or more consensus if the underlying algorithm can find multiple solution as good as each other.
If the algorithm is not able to provide multiple consensus, or if return_at_most_one_ranking is True then, it
should return a list made of the only / the first consensus found
:raise DistanceNotHandledException when the algorit
hm cannot compute the consensus following the distance given
as parameter
"""
scoring_scheme = asarray(distance.scoring_scheme)
if array_equal(scoring_scheme, array([[0, 1, 1, 0, 1, 1], [1, 1, 0, 1, 1, 0]])):
dst = 0
elif array_equal(scoring_scheme, array([[0, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1]])):
dst = 1
elif array_equal(scoring_scheme, array([[0, 1, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0]])):
dst = 2
else:
raise DistanceNotHandledException
if dst == 0:
rankings_to_use = Unification.rankings_to_rankings(rankings)
else:
rankings_to_use = rankings
nb_rankings = len(rankings_to_use)
rankings_copy = list(rankings_to_use)
shuffle(rankings_copy)
h = {}
id_ranking = 0
for ranking in rankings_copy:
id_bucket = 0
for bucket in ranking:
for element in bucket:
if element not in h:
h[element] = zeros(nb_rankings, dtype=int) - 1
h[element][id_ranking] = id_bucket
id_bucket += 1
id_ranking += 1
res = []
for el in sorted(h.items(), key=cmp_to_key(RepeatChoice.__compare)):
res.append([el[0]])
# kem = KemenyComputingFactory(scoring_scheme=self.scoring_scheme)
# kem = KendallTauGeneralizedNlogN()
return [res]
@staticmethod
def __compare(e1: tuple, e2: tuple) -> int:
first_ind_array_e1_inf_array_e2 = argmax(e1[1] < e2[1])
first_ind_array_e2_inf_array_e1 = argmax(e2[1] < e1[1])
if first_ind_array_e1_inf_array_e2 < first_ind_array_e2_inf_array_e1:
return -1
elif first_ind_array_e2_inf_array_e1 < first_ind_array_e1_inf_array_e2:
return 1
return 0
def is_breaking_ties_arbitrarily(self):
return True
def is_using_random_value(self):
return True
def get_full_name(self):
return "Repeat Choice"
def get_handled_distances(self):
"""
:return: a list of distances from distance_enumeration
"""
return (
GENERALIZED_KENDALL_TAU_DISTANCE,
GENERALIZED_INDUCED_KENDALL_TAU_DISTANCE,
GENERALIZED_KENDALL_TAU_DISTANCE_WITH_UNIFICATION
)
|
en
| 0.76928
|
:param rankings: A set of rankings :type rankings: list :param distance: The distance to use/consider :type distance: Distance :param return_at_most_one_ranking: the algorithm should not return more than one ranking :type return_at_most_one_ranking: bool :return one or more consensus if the underlying algorithm can find multiple solution as good as each other. If the algorithm is not able to provide multiple consensus, or if return_at_most_one_ranking is True then, it should return a list made of the only / the first consensus found :raise DistanceNotHandledException when the algorit hm cannot compute the consensus following the distance given as parameter # kem = KemenyComputingFactory(scoring_scheme=self.scoring_scheme) # kem = KendallTauGeneralizedNlogN() :return: a list of distances from distance_enumeration
| 3.002695
| 3
|
spk/subliminal/src/app/scanner.py
|
BKSteve/spksrc
| 2,211
|
6625395
|
#!/usr/local/subliminal/env/bin/python
from application.db import Session, Directory
from application.direct import Subliminal, scan, notify
import os
import sys
import argparse
class Scanner(object):
def __init__(self, directory_id):
self.directory_id = directory_id
self.session = Session()
def daemonize(self):
"""Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0: # exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0: # exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
def start(self):
"""Start the application and daemonize"""
self.daemonize()
self.run()
def run(self):
directory = self.session.query(Directory).get(self.directory_id)
if not os.path.exists(directory.path):
return 0
s = Subliminal()
results = scan(directory.path, s.config, temp_cache=True)
if s.config['General']['dsm_notifications']:
notify('Downloaded %d subtitle(s) for %d video(s) in directory %s' % (sum([len(s) for s in results.itervalues()]), len(results), directory.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory scanner')
parser.add_argument('id', help='directory id to scan', metavar='ID')
args = parser.parse_args()
scanner = Scanner(args.id)
scanner.start()
|
#!/usr/local/subliminal/env/bin/python
from application.db import Session, Directory
from application.direct import Subliminal, scan, notify
import os
import sys
import argparse
class Scanner(object):
def __init__(self, directory_id):
self.directory_id = directory_id
self.session = Session()
def daemonize(self):
"""Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0: # exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0: # exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
def start(self):
"""Start the application and daemonize"""
self.daemonize()
self.run()
def run(self):
directory = self.session.query(Directory).get(self.directory_id)
if not os.path.exists(directory.path):
return 0
s = Subliminal()
results = scan(directory.path, s.config, temp_cache=True)
if s.config['General']['dsm_notifications']:
notify('Downloaded %d subtitle(s) for %d video(s) in directory %s' % (sum([len(s) for s in results.itervalues()]), len(results), directory.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory scanner')
parser.add_argument('id', help='directory id to scan', metavar='ID')
args = parser.parse_args()
scanner = Scanner(args.id)
scanner.start()
|
en
| 0.50331
|
#!/usr/local/subliminal/env/bin/python Do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 # exit first parent #1 failed: %d (%s)\n' % (e.errno, e.strerror)) # decouple from parent environment # do second fork # exit from second parent #2 failed: %d (%s)\n' % (e.errno, e.strerror)) Start the application and daemonize
| 2.550938
| 3
|
src/test_train_model.py
|
iDataist/Deploying-a-Scalable-ML-Pipeline-in-Production
| 0
|
6625396
|
<reponame>iDataist/Deploying-a-Scalable-ML-Pipeline-in-Production<filename>src/test_train_model.py
import os
import numpy as np
import logging
import pytest
import constants
from ml.data import (
import_data,
perform_eda,
scaler,
encoder,
perform_train_test_split,
)
from ml.model import train_models, inference, compute_model_metrics
logging.basicConfig(
level=logging.INFO,
filename=constants.log_path,
format="%(asctime)-15s %(message)s",
)
logger = logging.getLogger()
def test_import_data():
"""
test data import
"""
try:
df = import_data(constants.data_path)
pytest.df = df
logger.info("Testing import_data: SUCCESS")
except Exception as err:
logger.error("Testing import_eda: {err}")
raise err
try:
assert df.shape[0] > 0
assert df.shape[1] > 0
except AssertionError as err:
logger.error(
"Testing import_data: \
the file doesn't appear to have rows and columns"
)
raise err
def test_eda():
"""
test perform eda function
"""
try:
perform_eda(pytest.df, constants.eda_output_path)
assert os.path.isfile(constants.eda_output_path)
logger.info("Testing perform_eda: SUCCESS")
except AssertionError as err:
logger.error(
"Testing perform_eda: the eda_output report can't be found"
)
raise err
def test_scaler():
"""
test scaler
"""
try:
scaled_df = scaler(pytest.df, constants.quant_columns)
pytest.scaled_df = scaled_df
logger.info("Testing scaler: SUCCESS")
except ValueError as err:
logger.error("Testing scaler: {err}")
raise err
try:
assert scaled_df.shape[0] == pytest.df.shape[0]
assert scaled_df.shape[1] == pytest.df.shape[1]
except AssertionError as err:
logger.error("Testing scaler: the shape of the dataframe changed")
raise err
def test_encoder():
"""
test encoder
"""
try:
encoded_df = encoder(
pytest.scaled_df, constants.cat_columns
)
pytest.encoded_df = encoded_df
logger.info("Testing encoder: SUCCESS")
except Exception as err:
logger.error("Testing encoder: {err}")
raise err
try:
assert encoded_df.select_dtypes(exclude=[np.number]).shape[1] == 0
except AssertionError as err:
logger.error("Testing encoder: there are still non-numerical values")
raise err
def test_perform_train_test_split():
"""
test encoder
"""
try:
split = perform_train_test_split(
pytest.encoded_df,
constants.target,
constants.test_size,
constants.random_state,
)
pytest.split = split
X_train, X_test, y_train, y_test = split
logger.info("Testing perform_train_test_split: SUCCESS")
except Exception as err:
logger.error(f"Testing perform_train_test_split: {err}")
raise err
try:
assert len(X_train) + len(X_test) == len(pytest.df)
assert len(y_train) + len(y_test) == len(pytest.df)
assert round(len(X_test) / len(pytest.df), 1) == constants.test_size
except AssertionError as err:
logger.error("Testing perform_train_test_split: {err}")
raise err
def test_train_models():
"""
test train_models
"""
X_train, X_test, y_train, y_test = pytest.split
try:
train_models(
X_train,
X_test,
y_train,
y_test,
constants.image_output_path,
constants.model_output_path,
)
logger.info("Testing train_models: SUCCESS")
except Exception as err:
logger.error(err)
raise err
try:
assert os.path.isfile(constants.model_output_path)
except AssertionError as err:
logger.error("Testing train_models: {err}")
raise err
if __name__ == "__main__":
logger.info("############################################################")
logger.info("start tests")
pytest.main(args=['-s', os.path.abspath(__file__)])
|
import os
import numpy as np
import logging
import pytest
import constants
from ml.data import (
import_data,
perform_eda,
scaler,
encoder,
perform_train_test_split,
)
from ml.model import train_models, inference, compute_model_metrics
logging.basicConfig(
level=logging.INFO,
filename=constants.log_path,
format="%(asctime)-15s %(message)s",
)
logger = logging.getLogger()
def test_import_data():
"""
test data import
"""
try:
df = import_data(constants.data_path)
pytest.df = df
logger.info("Testing import_data: SUCCESS")
except Exception as err:
logger.error("Testing import_eda: {err}")
raise err
try:
assert df.shape[0] > 0
assert df.shape[1] > 0
except AssertionError as err:
logger.error(
"Testing import_data: \
the file doesn't appear to have rows and columns"
)
raise err
def test_eda():
"""
test perform eda function
"""
try:
perform_eda(pytest.df, constants.eda_output_path)
assert os.path.isfile(constants.eda_output_path)
logger.info("Testing perform_eda: SUCCESS")
except AssertionError as err:
logger.error(
"Testing perform_eda: the eda_output report can't be found"
)
raise err
def test_scaler():
"""
test scaler
"""
try:
scaled_df = scaler(pytest.df, constants.quant_columns)
pytest.scaled_df = scaled_df
logger.info("Testing scaler: SUCCESS")
except ValueError as err:
logger.error("Testing scaler: {err}")
raise err
try:
assert scaled_df.shape[0] == pytest.df.shape[0]
assert scaled_df.shape[1] == pytest.df.shape[1]
except AssertionError as err:
logger.error("Testing scaler: the shape of the dataframe changed")
raise err
def test_encoder():
"""
test encoder
"""
try:
encoded_df = encoder(
pytest.scaled_df, constants.cat_columns
)
pytest.encoded_df = encoded_df
logger.info("Testing encoder: SUCCESS")
except Exception as err:
logger.error("Testing encoder: {err}")
raise err
try:
assert encoded_df.select_dtypes(exclude=[np.number]).shape[1] == 0
except AssertionError as err:
logger.error("Testing encoder: there are still non-numerical values")
raise err
def test_perform_train_test_split():
"""
test encoder
"""
try:
split = perform_train_test_split(
pytest.encoded_df,
constants.target,
constants.test_size,
constants.random_state,
)
pytest.split = split
X_train, X_test, y_train, y_test = split
logger.info("Testing perform_train_test_split: SUCCESS")
except Exception as err:
logger.error(f"Testing perform_train_test_split: {err}")
raise err
try:
assert len(X_train) + len(X_test) == len(pytest.df)
assert len(y_train) + len(y_test) == len(pytest.df)
assert round(len(X_test) / len(pytest.df), 1) == constants.test_size
except AssertionError as err:
logger.error("Testing perform_train_test_split: {err}")
raise err
def test_train_models():
"""
test train_models
"""
X_train, X_test, y_train, y_test = pytest.split
try:
train_models(
X_train,
X_test,
y_train,
y_test,
constants.image_output_path,
constants.model_output_path,
)
logger.info("Testing train_models: SUCCESS")
except Exception as err:
logger.error(err)
raise err
try:
assert os.path.isfile(constants.model_output_path)
except AssertionError as err:
logger.error("Testing train_models: {err}")
raise err
if __name__ == "__main__":
logger.info("############################################################")
logger.info("start tests")
pytest.main(args=['-s', os.path.abspath(__file__)])
|
de
| 0.421695
|
test data import test perform eda function test scaler test encoder test encoder test train_models ###########################################################")
| 2.394765
| 2
|
setup.py
|
invenia/GitLabChangelog
| 0
|
6625397
|
import codecs
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
TEST_DEPS = ["coverage[toml]", "pytest", "pytest-cov"]
DOCS_DEPS = ["sphinx", "sphinx-rtd-theme", "sphinx-autoapi", "recommonmark"]
CHECK_DEPS = [
"isort",
"flake8",
"flake8-quotes",
"pep8-naming",
"mypy",
"black",
"types-python-dateutil",
]
EXTRAS = {
"test": TEST_DEPS,
"docs": DOCS_DEPS,
"check": CHECK_DEPS,
"dev": TEST_DEPS + DOCS_DEPS + CHECK_DEPS,
}
REQUIREMENTS = [
"Jinja2>=2.10.3",
"python-dateutil>=2.8.1",
"python-gitlab>=1.8.0",
"semver>=2.9.0",
]
# Read in the version
with open(join(dirname(abspath(__file__)), "VERSION")) as version_file:
version = version_file.read().strip()
description = "Produces GitLab changelog release notes for projects that follow SemVer"
setup(
name="GitLabChangelog",
version=version,
description=description,
long_description=codecs.open("README.md", "r", "utf-8").read(),
long_description_content_type="text/markdown",
author="Invenia Technical Computing",
url="https://github.com/invenia/gitlabchangelog",
packages=find_packages(exclude=["tests"]),
install_requires=REQUIREMENTS,
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
platforms=["any"],
include_package_data=True,
tests_require=TEST_DEPS,
extras_require=EXTRAS,
)
|
import codecs
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
TEST_DEPS = ["coverage[toml]", "pytest", "pytest-cov"]
DOCS_DEPS = ["sphinx", "sphinx-rtd-theme", "sphinx-autoapi", "recommonmark"]
CHECK_DEPS = [
"isort",
"flake8",
"flake8-quotes",
"pep8-naming",
"mypy",
"black",
"types-python-dateutil",
]
EXTRAS = {
"test": TEST_DEPS,
"docs": DOCS_DEPS,
"check": CHECK_DEPS,
"dev": TEST_DEPS + DOCS_DEPS + CHECK_DEPS,
}
REQUIREMENTS = [
"Jinja2>=2.10.3",
"python-dateutil>=2.8.1",
"python-gitlab>=1.8.0",
"semver>=2.9.0",
]
# Read in the version
with open(join(dirname(abspath(__file__)), "VERSION")) as version_file:
version = version_file.read().strip()
description = "Produces GitLab changelog release notes for projects that follow SemVer"
setup(
name="GitLabChangelog",
version=version,
description=description,
long_description=codecs.open("README.md", "r", "utf-8").read(),
long_description_content_type="text/markdown",
author="Invenia Technical Computing",
url="https://github.com/invenia/gitlabchangelog",
packages=find_packages(exclude=["tests"]),
install_requires=REQUIREMENTS,
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
platforms=["any"],
include_package_data=True,
tests_require=TEST_DEPS,
extras_require=EXTRAS,
)
|
en
| 0.893044
|
# Read in the version
| 1.382054
| 1
|
zerver/webhooks/statuspage/view.py
|
ricardoteixeiraduarte/zulip
| 3
|
6625398
|
<reponame>ricardoteixeiraduarte/zulip
# Webhooks for external integrations.
from django.utils.translation import ugettext as _
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import get_client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any
INCIDENT_TEMPLATE = u'**{name}** \n * State: **{state}** \n * Description: {content}'
COMPONENT_TEMPLATE = u'**{name}** has changed status from **{old_status}** to **{new_status}**'
TOPIC_TEMPLATE = u'{name}: {description}'
def get_incident_events_body(payload: Dict[str, Any]) -> str:
return INCIDENT_TEMPLATE.format(
name = payload["incident"]["name"],
state = payload["incident"]["status"],
content = payload["incident"]["incident_updates"][0]["body"],
)
def get_components_update_body(payload: Dict[str, Any]) -> str:
return COMPONENT_TEMPLATE.format(
name = payload["component"]["name"],
old_status = payload["component_update"]["old_status"],
new_status = payload["component_update"]["new_status"],
)
def get_incident_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["incident"]["name"],
description = payload["page"]["status_description"],
)
def get_component_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["component"]["name"],
description = payload["page"]["status_description"],
)
@api_key_only_webhook_view('Statuspage')
@has_request_variables
def api_statuspage_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
status = payload["page"]["status_indicator"]
if status == "none":
topic = get_incident_topic(payload)
body = get_incident_events_body(payload)
else:
topic = get_component_topic(payload)
body = get_components_update_body(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
|
# Webhooks for external integrations.
from django.utils.translation import ugettext as _
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import get_client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any
INCIDENT_TEMPLATE = u'**{name}** \n * State: **{state}** \n * Description: {content}'
COMPONENT_TEMPLATE = u'**{name}** has changed status from **{old_status}** to **{new_status}**'
TOPIC_TEMPLATE = u'{name}: {description}'
def get_incident_events_body(payload: Dict[str, Any]) -> str:
return INCIDENT_TEMPLATE.format(
name = payload["incident"]["name"],
state = payload["incident"]["status"],
content = payload["incident"]["incident_updates"][0]["body"],
)
def get_components_update_body(payload: Dict[str, Any]) -> str:
return COMPONENT_TEMPLATE.format(
name = payload["component"]["name"],
old_status = payload["component_update"]["old_status"],
new_status = payload["component_update"]["new_status"],
)
def get_incident_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["incident"]["name"],
description = payload["page"]["status_description"],
)
def get_component_topic(payload: Dict[str, Any]) -> str:
return TOPIC_TEMPLATE.format(
name = payload["component"]["name"],
description = payload["page"]["status_description"],
)
@api_key_only_webhook_view('Statuspage')
@has_request_variables
def api_statuspage_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
status = payload["page"]["status_indicator"]
if status == "none":
topic = get_incident_topic(payload)
body = get_incident_events_body(payload)
else:
topic = get_component_topic(payload)
body = get_components_update_body(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
|
en
| 0.818381
|
# Webhooks for external integrations.
| 1.984127
| 2
|
devconf/ast/literal.py
|
everclear72216/ucapi
| 0
|
6625399
|
<filename>devconf/ast/literal.py<gh_stars>0
import ast.mixins.expression
class Literal(ast.mixins.expression.RValueExpression):
def __init__(self):
super().__init__()
def set_value(self, value):
self.set_type(value.get_type())
super().set_value(value)
|
<filename>devconf/ast/literal.py<gh_stars>0
import ast.mixins.expression
class Literal(ast.mixins.expression.RValueExpression):
def __init__(self):
super().__init__()
def set_value(self, value):
self.set_type(value.get_type())
super().set_value(value)
|
none
| 1
| 2.112157
| 2
|
|
supervisor/hassos.py
|
srdjanrosic/supervisor
| 1
|
6625400
|
<reponame>srdjanrosic/supervisor
"""HassOS support on supervisor."""
import asyncio
import logging
from pathlib import Path
from typing import Awaitable, Optional
import aiohttp
from awesomeversion import AwesomeVersion, AwesomeVersionException
from cpe import CPE
from .coresys import CoreSys, CoreSysAttributes
from .dbus.rauc import RaucState
from .exceptions import DBusError, HassOSJobError, HassOSUpdateError
from .jobs.const import JobCondition, JobExecutionLimit
from .jobs.decorator import Job
_LOGGER: logging.Logger = logging.getLogger(__name__)
class HassOS(CoreSysAttributes):
"""HassOS interface inside supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize HassOS handler."""
self.coresys: CoreSys = coresys
self._available: bool = False
self._version: Optional[AwesomeVersion] = None
self._board: Optional[str] = None
self._os_name: Optional[str] = None
@property
def available(self) -> bool:
"""Return True, if HassOS on host."""
return self._available
@property
def version(self) -> Optional[AwesomeVersion]:
"""Return version of HassOS."""
return self._version
@property
def latest_version(self) -> Optional[AwesomeVersion]:
"""Return version of HassOS."""
return self.sys_updater.version_hassos
@property
def need_update(self) -> bool:
"""Return true if a HassOS update is available."""
try:
return self.version < self.latest_version
except (AwesomeVersionException, TypeError):
return False
@property
def board(self) -> Optional[str]:
"""Return board name."""
return self._board
@property
def os_name(self) -> Optional[str]:
"""Return OS name."""
return self._os_name
def _get_download_url(self, version: AwesomeVersion) -> str:
raw_url = self.sys_updater.ota_url
if raw_url is None:
raise HassOSUpdateError("Don't have an URL for OTA updates!", _LOGGER.error)
update_board = self.board
update_os_name = self.os_name
# OS version 6 and later renamed intel-nuc to generic-x86-64...
if update_board == "intel-nuc" and version >= 6.0:
update_board = "generic-x86-64"
# The OS name used to be hassos before renaming to haos...
if version < 6.0:
update_os_name = "hassos"
else:
update_os_name = "haos"
url = raw_url.format(
version=str(version), board=update_board, os_name=update_os_name
)
return url
async def _download_raucb(self, url: str, raucb: Path) -> None:
"""Download rauc bundle (OTA) from URL."""
_LOGGER.info("Fetch OTA update from %s", url)
try:
timeout = aiohttp.ClientTimeout(total=60 * 60, connect=180)
async with self.sys_websession.get(url, timeout=timeout) as request:
if request.status != 200:
raise HassOSUpdateError(
f"Error raised from OTA Webserver: {request.status}",
_LOGGER.error,
)
# Download RAUCB file
with raucb.open("wb") as ota_file:
while True:
chunk = await request.content.read(1_048_576)
if not chunk:
break
ota_file.write(chunk)
_LOGGER.info("Completed download of OTA update file %s", raucb)
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
self.sys_supervisor.connectivity = False
raise HassOSUpdateError(
f"Can't fetch OTA update from {url}: {err!s}", _LOGGER.error
) from err
except OSError as err:
raise HassOSUpdateError(
f"Can't write OTA file: {err!s}", _LOGGER.error
) from err
async def load(self) -> None:
"""Load HassOS data."""
try:
if not self.sys_host.info.cpe:
raise NotImplementedError()
cpe = CPE(self.sys_host.info.cpe)
os_name = cpe.get_product()[0]
if os_name not in ("hassos", "haos"):
raise NotImplementedError()
except NotImplementedError:
_LOGGER.info("No Home Assistant Operating System found")
return
# Store meta data
self._available = True
self.sys_host.supported_features.cache_clear()
self._version = AwesomeVersion(cpe.get_version()[0])
self._board = cpe.get_target_hardware()[0]
self._os_name = cpe.get_product()[0]
await self.sys_dbus.rauc.update()
_LOGGER.info(
"Detect Home Assistant Operating System %s / BootSlot %s",
self.version,
self.sys_dbus.rauc.boot_slot,
)
@Job(
conditions=[JobCondition.HAOS],
on_condition=HassOSJobError,
)
async def config_sync(self) -> Awaitable[None]:
"""Trigger a host config reload from usb.
Return a coroutine.
"""
_LOGGER.info(
"Synchronizing configuration from USB with Home Assistant Operating System."
)
await self.sys_host.services.restart("hassos-config.service")
@Job(
conditions=[
JobCondition.HAOS,
JobCondition.INTERNET_SYSTEM,
JobCondition.RUNNING,
],
limit=JobExecutionLimit.ONCE,
on_condition=HassOSJobError,
)
async def update(self, version: Optional[AwesomeVersion] = None) -> None:
"""Update HassOS system."""
version = version or self.latest_version
# Check installed version
if version == self.version:
raise HassOSUpdateError(
f"Version {version!s} is already installed", _LOGGER.warning
)
# Fetch files from internet
ota_url = self._get_download_url(version)
int_ota = Path(self.sys_config.path_tmp, f"hassos-{version!s}.raucb")
await self._download_raucb(ota_url, int_ota)
ext_ota = Path(self.sys_config.path_extern_tmp, int_ota.name)
try:
await self.sys_dbus.rauc.install(ext_ota)
completed = await self.sys_dbus.rauc.signal_completed()
except DBusError as err:
raise HassOSUpdateError("Rauc communication error", _LOGGER.error) from err
finally:
int_ota.unlink()
# Update success
if 0 in completed:
_LOGGER.info(
"Install of Home Assistant Operating System %s success", version
)
self.sys_create_task(self.sys_host.control.reboot())
return
# Update failed
await self.sys_dbus.rauc.update()
_LOGGER.error(
"Home Assistant Operating System update failed with: %s",
self.sys_dbus.rauc.last_error,
)
raise HassOSUpdateError()
@Job(conditions=[JobCondition.HAOS])
async def mark_healthy(self) -> None:
"""Set booted partition as good for rauc."""
try:
response = await self.sys_dbus.rauc.mark(RaucState.GOOD, "booted")
except DBusError:
_LOGGER.error("Can't mark booted partition as healthy!")
else:
_LOGGER.info("Rauc: %s - %s", self.sys_dbus.rauc.boot_slot, response[1])
|
"""HassOS support on supervisor."""
import asyncio
import logging
from pathlib import Path
from typing import Awaitable, Optional
import aiohttp
from awesomeversion import AwesomeVersion, AwesomeVersionException
from cpe import CPE
from .coresys import CoreSys, CoreSysAttributes
from .dbus.rauc import RaucState
from .exceptions import DBusError, HassOSJobError, HassOSUpdateError
from .jobs.const import JobCondition, JobExecutionLimit
from .jobs.decorator import Job
_LOGGER: logging.Logger = logging.getLogger(__name__)
class HassOS(CoreSysAttributes):
"""HassOS interface inside supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize HassOS handler."""
self.coresys: CoreSys = coresys
self._available: bool = False
self._version: Optional[AwesomeVersion] = None
self._board: Optional[str] = None
self._os_name: Optional[str] = None
@property
def available(self) -> bool:
"""Return True, if HassOS on host."""
return self._available
@property
def version(self) -> Optional[AwesomeVersion]:
"""Return version of HassOS."""
return self._version
@property
def latest_version(self) -> Optional[AwesomeVersion]:
"""Return version of HassOS."""
return self.sys_updater.version_hassos
@property
def need_update(self) -> bool:
"""Return true if a HassOS update is available."""
try:
return self.version < self.latest_version
except (AwesomeVersionException, TypeError):
return False
@property
def board(self) -> Optional[str]:
"""Return board name."""
return self._board
@property
def os_name(self) -> Optional[str]:
"""Return OS name."""
return self._os_name
def _get_download_url(self, version: AwesomeVersion) -> str:
raw_url = self.sys_updater.ota_url
if raw_url is None:
raise HassOSUpdateError("Don't have an URL for OTA updates!", _LOGGER.error)
update_board = self.board
update_os_name = self.os_name
# OS version 6 and later renamed intel-nuc to generic-x86-64...
if update_board == "intel-nuc" and version >= 6.0:
update_board = "generic-x86-64"
# The OS name used to be hassos before renaming to haos...
if version < 6.0:
update_os_name = "hassos"
else:
update_os_name = "haos"
url = raw_url.format(
version=str(version), board=update_board, os_name=update_os_name
)
return url
async def _download_raucb(self, url: str, raucb: Path) -> None:
"""Download rauc bundle (OTA) from URL."""
_LOGGER.info("Fetch OTA update from %s", url)
try:
timeout = aiohttp.ClientTimeout(total=60 * 60, connect=180)
async with self.sys_websession.get(url, timeout=timeout) as request:
if request.status != 200:
raise HassOSUpdateError(
f"Error raised from OTA Webserver: {request.status}",
_LOGGER.error,
)
# Download RAUCB file
with raucb.open("wb") as ota_file:
while True:
chunk = await request.content.read(1_048_576)
if not chunk:
break
ota_file.write(chunk)
_LOGGER.info("Completed download of OTA update file %s", raucb)
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
self.sys_supervisor.connectivity = False
raise HassOSUpdateError(
f"Can't fetch OTA update from {url}: {err!s}", _LOGGER.error
) from err
except OSError as err:
raise HassOSUpdateError(
f"Can't write OTA file: {err!s}", _LOGGER.error
) from err
async def load(self) -> None:
"""Load HassOS data."""
try:
if not self.sys_host.info.cpe:
raise NotImplementedError()
cpe = CPE(self.sys_host.info.cpe)
os_name = cpe.get_product()[0]
if os_name not in ("hassos", "haos"):
raise NotImplementedError()
except NotImplementedError:
_LOGGER.info("No Home Assistant Operating System found")
return
# Store meta data
self._available = True
self.sys_host.supported_features.cache_clear()
self._version = AwesomeVersion(cpe.get_version()[0])
self._board = cpe.get_target_hardware()[0]
self._os_name = cpe.get_product()[0]
await self.sys_dbus.rauc.update()
_LOGGER.info(
"Detect Home Assistant Operating System %s / BootSlot %s",
self.version,
self.sys_dbus.rauc.boot_slot,
)
@Job(
conditions=[JobCondition.HAOS],
on_condition=HassOSJobError,
)
async def config_sync(self) -> Awaitable[None]:
"""Trigger a host config reload from usb.
Return a coroutine.
"""
_LOGGER.info(
"Synchronizing configuration from USB with Home Assistant Operating System."
)
await self.sys_host.services.restart("hassos-config.service")
@Job(
conditions=[
JobCondition.HAOS,
JobCondition.INTERNET_SYSTEM,
JobCondition.RUNNING,
],
limit=JobExecutionLimit.ONCE,
on_condition=HassOSJobError,
)
async def update(self, version: Optional[AwesomeVersion] = None) -> None:
"""Update HassOS system."""
version = version or self.latest_version
# Check installed version
if version == self.version:
raise HassOSUpdateError(
f"Version {version!s} is already installed", _LOGGER.warning
)
# Fetch files from internet
ota_url = self._get_download_url(version)
int_ota = Path(self.sys_config.path_tmp, f"hassos-{version!s}.raucb")
await self._download_raucb(ota_url, int_ota)
ext_ota = Path(self.sys_config.path_extern_tmp, int_ota.name)
try:
await self.sys_dbus.rauc.install(ext_ota)
completed = await self.sys_dbus.rauc.signal_completed()
except DBusError as err:
raise HassOSUpdateError("Rauc communication error", _LOGGER.error) from err
finally:
int_ota.unlink()
# Update success
if 0 in completed:
_LOGGER.info(
"Install of Home Assistant Operating System %s success", version
)
self.sys_create_task(self.sys_host.control.reboot())
return
# Update failed
await self.sys_dbus.rauc.update()
_LOGGER.error(
"Home Assistant Operating System update failed with: %s",
self.sys_dbus.rauc.last_error,
)
raise HassOSUpdateError()
@Job(conditions=[JobCondition.HAOS])
async def mark_healthy(self) -> None:
"""Set booted partition as good for rauc."""
try:
response = await self.sys_dbus.rauc.mark(RaucState.GOOD, "booted")
except DBusError:
_LOGGER.error("Can't mark booted partition as healthy!")
else:
_LOGGER.info("Rauc: %s - %s", self.sys_dbus.rauc.boot_slot, response[1])
|
en
| 0.660204
|
HassOS support on supervisor. HassOS interface inside supervisor. Initialize HassOS handler. Return True, if HassOS on host. Return version of HassOS. Return version of HassOS. Return true if a HassOS update is available. Return board name. Return OS name. # OS version 6 and later renamed intel-nuc to generic-x86-64... # The OS name used to be hassos before renaming to haos... Download rauc bundle (OTA) from URL. # Download RAUCB file Load HassOS data. # Store meta data Trigger a host config reload from usb. Return a coroutine. Update HassOS system. # Check installed version # Fetch files from internet # Update success # Update failed Set booted partition as good for rauc.
| 2.133447
| 2
|
IPython/quarantine/ipy_fsops.py
|
dchichkov/ipython
| 1
|
6625401
|
""" File system operations
Contains: Simple variants of normal unix shell commands (icp, imv, irm,
imkdir, igrep).
Some "otherwise handy" utils ('collect' for gathering files to
~/_ipython/collect, 'inote' for collecting single note lines to
~/_ipython/note.txt)
Mostly of use for bare windows installations where cygwin/equivalent is not
installed and you would otherwise need to deal with dos versions of the
commands (that e.g. don't understand / as path separator). These can
do some useful tricks on their own, though (like use 'mglob' patterns).
Not to be confused with ipipe commands (ils etc.) that also start with i.
"""
from IPython.core import ipapi
from IPython.core.error import TryNext
ip = ipapi.get()
import shutil,os,shlex
from IPython.external import mglob
from IPython.external.path import path
from IPython.core.error import UsageError
import IPython.utils.generics
def parse_args(args):
""" Given arg string 'CMD files... target', return ([files], target) """
tup = args.split(None, 1)
if len(tup) == 1:
raise UsageError("Expected arguments for " + tup[0])
tup2 = shlex.split(tup[1])
flist, trg = mglob.expand(tup2[0:-1]), tup2[-1]
if not flist:
raise UsageError("No files found:" + str(tup2[0:-1]))
return flist, trg
def icp(ip,arg):
""" icp files... targetdir
Copy all files to target, creating dirs for target if necessary
icp srcdir dstdir
Copy srcdir to distdir
"""
import distutils.dir_util
fs, targetdir = parse_args(arg)
if not os.path.isdir(targetdir) and len(fs) > 1:
distutils.dir_util.mkpath(targetdir,verbose =1)
for f in fs:
if os.path.isdir(f):
shutil.copytree(f, targetdir)
else:
shutil.copy2(f,targetdir)
return fs
ip.define_alias("icp",icp)
def imv(ip,arg):
""" imv src tgt
Move source to target.
"""
fs, target = parse_args(arg)
if len(fs) > 1:
assert os.path.isdir(target)
for f in fs:
shutil.move(f, target)
return fs
ip.define_alias("imv",imv)
def irm(ip,arg):
""" irm path[s]...
Remove file[s] or dir[s] path. Dirs are deleted recursively.
"""
try:
paths = mglob.expand(arg.split(None,1)[1])
except IndexError:
raise UsageError("%irm paths...")
import distutils.dir_util
for p in paths:
print "rm",p
if os.path.isdir(p):
distutils.dir_util.remove_tree(p, verbose = 1)
else:
os.remove(p)
ip.define_alias("irm",irm)
def imkdir(ip,arg):
""" imkdir path
Creates dir path, and all dirs on the road
"""
import distutils.dir_util
targetdir = arg.split(None,1)[1]
distutils.dir_util.mkpath(targetdir,verbose =1)
ip.define_alias("imkdir",imkdir)
def igrep(ip,arg):
""" igrep PAT files...
Very dumb file scan, case-insensitive.
e.g.
igrep "test this" rec:*.py
"""
elems = shlex.split(arg)
dummy, pat, fs = elems[0], elems[1], mglob.expand(elems[2:])
res = []
for f in fs:
found = False
for l in open(f):
if pat.lower() in l.lower():
if not found:
print "[[",f,"]]"
found = True
res.append(f)
print l.rstrip()
return res
ip.define_alias("igrep",igrep)
def collect(ip,arg):
""" collect foo/a.txt rec:bar=*.py
Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar,
likewise
Without args, try to open ~/_ipython/collect dir (in win32 at least).
"""
from IPython.external.path import path
basedir = path(ip.ipython_dir + '/collect')
try:
fs = mglob.expand(arg.split(None,1)[1])
except IndexError:
os.startfile(basedir)
return
for f in fs:
f = path(f)
trg = basedir / f.splitdrive()[1].lstrip('/\\')
if f.isdir():
print "mkdir",trg
trg.makedirs()
continue
dname = trg.dirname()
if not dname.isdir():
dname.makedirs()
print f,"=>",trg
shutil.copy2(f,trg)
ip.define_alias("collect",collect)
def inote(ip,arg):
""" inote Hello world
Adds timestamp and Hello world to ~/_ipython/notes.txt
Without args, opens notes.txt for editing.
"""
import time
fname = ip.ipython_dir + '/notes.txt'
try:
entry = " === " + time.asctime() + ': ===\n' + arg.split(None,1)[1] + '\n'
f= open(fname, 'a').write(entry)
except IndexError:
ip.hooks.editor(fname)
ip.define_alias("inote",inote)
def pathobj_mangle(p):
return p.replace(' ', '__').replace('.','DOT')
def pathobj_unmangle(s):
return s.replace('__',' ').replace('DOT','.')
class PathObj(path):
def __init__(self,p):
self.path = p
if p != '.':
self.ents = [pathobj_mangle(ent) for ent in os.listdir(p)]
else:
self.ents = None
def __complete__(self):
if self.path != '.':
return self.ents
self.ents = [pathobj_mangle(ent) for ent in os.listdir('.')]
return self.ents
def __getattr__(self,name):
if name in self.ents:
if self.path.endswith('/'):
sep = ''
else:
sep = '/'
tgt = self.path + sep + pathobj_unmangle(name)
#print "tgt",tgt
if os.path.isdir(tgt):
return PathObj(tgt)
if os.path.isfile(tgt):
return path(tgt)
raise AttributeError, name # <<< DON'T FORGET THIS LINE !!
def __str__(self):
return self.path
def __repr__(self):
return "<PathObj to %s>" % self.path
def __call__(self):
print "cd:",self.path
os.chdir(self.path)
def complete_pathobj(obj, prev_completions):
if hasattr(obj,'__complete__'):
res = obj.__complete__()
if res:
return res
# just return normal attributes of 'path' object if the dir is empty
raise TryNext
complete_pathobj = IPython.utils.generics.complete_object.when_type(PathObj)(complete_pathobj)
def test_pathobj():
#p = PathObj('c:/prj')
#p2 = p.cgi
#print p,p2
rootdir = PathObj("/")
startmenu = PathObj("d:/Documents and Settings/All Users/Start Menu/Programs")
cwd = PathObj('.')
ip.push("rootdir startmenu cwd")
#test_pathobj()
|
""" File system operations
Contains: Simple variants of normal unix shell commands (icp, imv, irm,
imkdir, igrep).
Some "otherwise handy" utils ('collect' for gathering files to
~/_ipython/collect, 'inote' for collecting single note lines to
~/_ipython/note.txt)
Mostly of use for bare windows installations where cygwin/equivalent is not
installed and you would otherwise need to deal with dos versions of the
commands (that e.g. don't understand / as path separator). These can
do some useful tricks on their own, though (like use 'mglob' patterns).
Not to be confused with ipipe commands (ils etc.) that also start with i.
"""
from IPython.core import ipapi
from IPython.core.error import TryNext
ip = ipapi.get()
import shutil,os,shlex
from IPython.external import mglob
from IPython.external.path import path
from IPython.core.error import UsageError
import IPython.utils.generics
def parse_args(args):
""" Given arg string 'CMD files... target', return ([files], target) """
tup = args.split(None, 1)
if len(tup) == 1:
raise UsageError("Expected arguments for " + tup[0])
tup2 = shlex.split(tup[1])
flist, trg = mglob.expand(tup2[0:-1]), tup2[-1]
if not flist:
raise UsageError("No files found:" + str(tup2[0:-1]))
return flist, trg
def icp(ip,arg):
""" icp files... targetdir
Copy all files to target, creating dirs for target if necessary
icp srcdir dstdir
Copy srcdir to distdir
"""
import distutils.dir_util
fs, targetdir = parse_args(arg)
if not os.path.isdir(targetdir) and len(fs) > 1:
distutils.dir_util.mkpath(targetdir,verbose =1)
for f in fs:
if os.path.isdir(f):
shutil.copytree(f, targetdir)
else:
shutil.copy2(f,targetdir)
return fs
ip.define_alias("icp",icp)
def imv(ip,arg):
""" imv src tgt
Move source to target.
"""
fs, target = parse_args(arg)
if len(fs) > 1:
assert os.path.isdir(target)
for f in fs:
shutil.move(f, target)
return fs
ip.define_alias("imv",imv)
def irm(ip,arg):
""" irm path[s]...
Remove file[s] or dir[s] path. Dirs are deleted recursively.
"""
try:
paths = mglob.expand(arg.split(None,1)[1])
except IndexError:
raise UsageError("%irm paths...")
import distutils.dir_util
for p in paths:
print "rm",p
if os.path.isdir(p):
distutils.dir_util.remove_tree(p, verbose = 1)
else:
os.remove(p)
ip.define_alias("irm",irm)
def imkdir(ip,arg):
""" imkdir path
Creates dir path, and all dirs on the road
"""
import distutils.dir_util
targetdir = arg.split(None,1)[1]
distutils.dir_util.mkpath(targetdir,verbose =1)
ip.define_alias("imkdir",imkdir)
def igrep(ip,arg):
""" igrep PAT files...
Very dumb file scan, case-insensitive.
e.g.
igrep "test this" rec:*.py
"""
elems = shlex.split(arg)
dummy, pat, fs = elems[0], elems[1], mglob.expand(elems[2:])
res = []
for f in fs:
found = False
for l in open(f):
if pat.lower() in l.lower():
if not found:
print "[[",f,"]]"
found = True
res.append(f)
print l.rstrip()
return res
ip.define_alias("igrep",igrep)
def collect(ip,arg):
""" collect foo/a.txt rec:bar=*.py
Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar,
likewise
Without args, try to open ~/_ipython/collect dir (in win32 at least).
"""
from IPython.external.path import path
basedir = path(ip.ipython_dir + '/collect')
try:
fs = mglob.expand(arg.split(None,1)[1])
except IndexError:
os.startfile(basedir)
return
for f in fs:
f = path(f)
trg = basedir / f.splitdrive()[1].lstrip('/\\')
if f.isdir():
print "mkdir",trg
trg.makedirs()
continue
dname = trg.dirname()
if not dname.isdir():
dname.makedirs()
print f,"=>",trg
shutil.copy2(f,trg)
ip.define_alias("collect",collect)
def inote(ip,arg):
""" inote Hello world
Adds timestamp and Hello world to ~/_ipython/notes.txt
Without args, opens notes.txt for editing.
"""
import time
fname = ip.ipython_dir + '/notes.txt'
try:
entry = " === " + time.asctime() + ': ===\n' + arg.split(None,1)[1] + '\n'
f= open(fname, 'a').write(entry)
except IndexError:
ip.hooks.editor(fname)
ip.define_alias("inote",inote)
def pathobj_mangle(p):
return p.replace(' ', '__').replace('.','DOT')
def pathobj_unmangle(s):
return s.replace('__',' ').replace('DOT','.')
class PathObj(path):
def __init__(self,p):
self.path = p
if p != '.':
self.ents = [pathobj_mangle(ent) for ent in os.listdir(p)]
else:
self.ents = None
def __complete__(self):
if self.path != '.':
return self.ents
self.ents = [pathobj_mangle(ent) for ent in os.listdir('.')]
return self.ents
def __getattr__(self,name):
if name in self.ents:
if self.path.endswith('/'):
sep = ''
else:
sep = '/'
tgt = self.path + sep + pathobj_unmangle(name)
#print "tgt",tgt
if os.path.isdir(tgt):
return PathObj(tgt)
if os.path.isfile(tgt):
return path(tgt)
raise AttributeError, name # <<< DON'T FORGET THIS LINE !!
def __str__(self):
return self.path
def __repr__(self):
return "<PathObj to %s>" % self.path
def __call__(self):
print "cd:",self.path
os.chdir(self.path)
def complete_pathobj(obj, prev_completions):
if hasattr(obj,'__complete__'):
res = obj.__complete__()
if res:
return res
# just return normal attributes of 'path' object if the dir is empty
raise TryNext
complete_pathobj = IPython.utils.generics.complete_object.when_type(PathObj)(complete_pathobj)
def test_pathobj():
#p = PathObj('c:/prj')
#p2 = p.cgi
#print p,p2
rootdir = PathObj("/")
startmenu = PathObj("d:/Documents and Settings/All Users/Start Menu/Programs")
cwd = PathObj('.')
ip.push("rootdir startmenu cwd")
#test_pathobj()
|
en
| 0.739703
|
File system operations Contains: Simple variants of normal unix shell commands (icp, imv, irm, imkdir, igrep). Some "otherwise handy" utils ('collect' for gathering files to ~/_ipython/collect, 'inote' for collecting single note lines to ~/_ipython/note.txt) Mostly of use for bare windows installations where cygwin/equivalent is not installed and you would otherwise need to deal with dos versions of the commands (that e.g. don't understand / as path separator). These can do some useful tricks on their own, though (like use 'mglob' patterns). Not to be confused with ipipe commands (ils etc.) that also start with i. Given arg string 'CMD files... target', return ([files], target) icp files... targetdir Copy all files to target, creating dirs for target if necessary icp srcdir dstdir Copy srcdir to distdir imv src tgt Move source to target. irm path[s]... Remove file[s] or dir[s] path. Dirs are deleted recursively. imkdir path Creates dir path, and all dirs on the road igrep PAT files... Very dumb file scan, case-insensitive. e.g. igrep "test this" rec:*.py collect foo/a.txt rec:bar=*.py Copies foo/a.txt to ~/_ipython/collect/foo/a.txt and *.py from bar, likewise Without args, try to open ~/_ipython/collect dir (in win32 at least). inote Hello world Adds timestamp and Hello world to ~/_ipython/notes.txt Without args, opens notes.txt for editing. #print "tgt",tgt # <<< DON'T FORGET THIS LINE !! # just return normal attributes of 'path' object if the dir is empty #p = PathObj('c:/prj') #p2 = p.cgi #print p,p2 #test_pathobj()
| 2.88274
| 3
|
benchexec/resources.py
|
blizzard4591/benchexec
| 0
|
6625402
|
<filename>benchexec/resources.py
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
"""
This module contains functions for computing assignments of resources to runs.
"""
import collections
import itertools
import logging
import math
import os
import sys
from benchexec import cgroups
from benchexec import util
__all__ = [
"check_memory_size",
"get_cpu_cores_per_run",
"get_memory_banks_per_run",
"get_cpu_package_for_core",
]
def get_cpu_cores_per_run(
coreLimit, num_of_threads, use_hyperthreading, my_cgroups, coreSet=None
):
"""
Calculate an assignment of the available CPU cores to a number
of parallel benchmark executions such that each run gets its own cores
without overlapping of cores between runs.
In case the machine has hyper-threading, this method tries to avoid
putting two different runs on the same physical core
(but it does not guarantee this if the number of parallel runs is too high to avoid it).
In case the machine has multiple CPUs, this method avoids
splitting a run across multiple CPUs if the number of cores per run
is lower than the number of cores per CPU
(splitting a run over multiple CPUs provides worse performance).
It will also try to split the runs evenly across all available CPUs.
A few theoretically-possible cases are not implemented,
for example assigning three 10-core runs on a machine
with two 16-core CPUs (this would have unfair core assignment
and thus undesirable performance characteristics anyway).
The list of available cores is read from the cgroup file system,
such that the assigned cores are a subset of the cores
that the current process is allowed to use.
This script does currently not support situations
where the available cores are asymmetrically split over CPUs,
e.g. 3 cores on one CPU and 5 on another.
@param coreLimit: the number of cores for each run
@param num_of_threads: the number of parallel benchmark executions
@param coreSet: the list of CPU cores identifiers provided by a user, None makes benchexec using all cores
@return a list of lists, where each inner list contains the cores for one run
"""
try:
# read list of available CPU cores
allCpus = util.parse_int_list(my_cgroups.get_value(cgroups.CPUSET, "cpus"))
# Filter CPU cores according to the list of identifiers provided by a user
if coreSet:
invalid_cores = sorted(set(coreSet).difference(set(allCpus)))
if len(invalid_cores) > 0:
raise ValueError(
"The following provided CPU cores are not available: {}".format(
", ".join(map(str, invalid_cores))
)
)
allCpus = [core for core in allCpus if core in coreSet]
logging.debug("List of available CPU cores is %s.", allCpus)
# read mapping of core to memory region
cores_of_memory_region = collections.defaultdict(list)
for core in allCpus:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
memory_regions = _get_memory_banks_listed_in_dir(coreDir)
if memory_regions:
cores_of_memory_region[memory_regions[0]].append(core)
else:
# If some cores do not have NUMA information, skip using it completely
logging.warning(
"Kernel does not have NUMA support. Use benchexec at your own risk."
)
cores_of_memory_region = {}
break
logging.debug("Memory regions of cores are %s.", cores_of_memory_region)
# read mapping of core to CPU ("physical package")
cores_of_package = collections.defaultdict(list)
for core in allCpus:
package = get_cpu_package_for_core(core)
cores_of_package[package].append(core)
logging.debug("Physical packages of cores are %s.", cores_of_package)
# select the more fine grained division among memory regions and physical package
if len(cores_of_memory_region) >= len(cores_of_package):
cores_of_unit = cores_of_memory_region
logging.debug("Using memory regions as the basis for cpu core division")
else:
cores_of_unit = cores_of_package
logging.debug("Using physical packages as the basis for cpu core division")
# read hyper-threading information (sibling cores sharing the same physical core)
siblings_of_core = {}
for core in allCpus:
siblings = util.parse_int_list(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list".format(
core
)
)
)
siblings_of_core[core] = siblings
logging.debug("Siblings of cores are %s.", siblings_of_core)
except ValueError as e:
sys.exit("Could not read CPU information from kernel: {0}".format(e))
return _get_cpu_cores_per_run0(
coreLimit,
num_of_threads,
use_hyperthreading,
allCpus,
cores_of_unit,
siblings_of_core,
)
def _get_cpu_cores_per_run0(
coreLimit,
num_of_threads,
use_hyperthreading,
allCpus,
cores_of_unit,
siblings_of_core,
):
"""This method does the actual work of _get_cpu_cores_per_run
without reading the machine architecture from the file system
in order to be testable. For description, c.f. above.
Note that this method might change the input parameters!
Do not call it directly, call getCpuCoresPerRun()!
@param use_hyperthreading: A boolean to check if no-hyperthreading method is being used
@param allCpus: the list of all available cores
@param cores_of_unit: a mapping from logical unit (can be memory region (NUMA node) or physical package(CPU), depending on the architecture of system)
to lists of cores that belong to this unit
@param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core)
"""
# First, do some checks whether this algorithm has a chance to work.
if coreLimit > len(allCpus):
sys.exit(
"Cannot run benchmarks with {0} CPU cores, only {1} CPU cores available.".format(
coreLimit, len(allCpus)
)
)
if coreLimit * num_of_threads > len(allCpus):
sys.exit(
"Cannot run {0} benchmarks in parallel with {1} CPU cores each, only {2} CPU cores available. Please reduce the number of threads to {3}.".format(
num_of_threads, coreLimit, len(allCpus), len(allCpus) // coreLimit
)
)
if not use_hyperthreading:
unit_of_core = {}
unused_cores = []
for unit, cores in cores_of_unit.items():
for core in cores:
unit_of_core[core] = unit
for core, siblings in siblings_of_core.items():
if core in allCpus:
siblings.remove(core)
cores_of_unit[unit_of_core[core]] = [
c for c in cores_of_unit[unit_of_core[core]] if c not in siblings
]
siblings_of_core[core] = [core]
allCpus = [c for c in allCpus if c not in siblings]
else:
unused_cores.append(core)
for core in unused_cores:
siblings_of_core.pop(core)
logging.debug(
"Running in no-hyperthreading mode, avoiding the use of CPU cores {}".format(
unused_cores
)
)
unit_size = None # Number of cores per unit
for unit, cores in cores_of_unit.items():
if unit_size is None:
unit_size = len(cores)
elif unit_size != len(cores):
sys.exit(
"Asymmetric machine architecture not supported: CPU/memory region {0} has {1} cores, but other CPU/memory region has {2} cores.".format(
unit, len(cores), unit_size
)
)
core_size = None # Number of threads per core
for core, siblings in siblings_of_core.items():
if core_size is None:
core_size = len(siblings)
elif core_size != len(siblings):
sys.exit(
"Asymmetric machine architecture not supported: CPU core {0} has {1} siblings, but other core has {2} siblings.".format(
core, len(siblings), core_size
)
)
all_cpus_set = set(allCpus)
for core, siblings in siblings_of_core.items():
siblings_set = set(siblings)
if not siblings_set.issubset(all_cpus_set):
sys.exit(
"Core assignment is unsupported because siblings {0} of core {1} are not usable. Please always make all virtual cores of a physical core available.".format(
siblings_set.difference(all_cpus_set), core
)
)
# Second, compute some values we will need.
unit_count = len(cores_of_unit)
units = sorted(cores_of_unit.keys())
coreLimit_rounded_up = int(math.ceil(coreLimit / core_size) * core_size)
assert coreLimit <= coreLimit_rounded_up < (coreLimit + core_size)
units_per_run = int(math.ceil(coreLimit_rounded_up / unit_size))
if units_per_run > 1 and units_per_run * num_of_threads > unit_count:
sys.exit(
"Cannot split runs over multiple CPUs/memory regions and at the same time assign multiple runs to the same CPU/memory region. Please reduce the number of threads to {0}.".format(
unit_count // units_per_run
)
)
runs_per_unit = int(math.ceil(num_of_threads / unit_count))
assert units_per_run == 1 or runs_per_unit == 1
if units_per_run == 1 and runs_per_unit * coreLimit > unit_size:
sys.exit(
"Cannot run {} benchmarks with {} cores on {} CPUs/memory regions with {} cores, because runs would need to be split across multiple CPUs/memory regions. Please reduce the number of threads.".format(
num_of_threads, coreLimit, unit_count, unit_size
)
)
# Warn on misuse of hyper-threading
need_HT = False
if units_per_run == 1:
# Checking whether the total amount of usable physical cores is not enough,
# there might be some cores we cannot use, e.g. when scheduling with coreLimit=3 on quad-core machines.
# Thus we check per unit.
assert coreLimit * runs_per_unit <= unit_size
if coreLimit_rounded_up * runs_per_unit > unit_size:
need_HT = True
logging.warning(
"The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.",
(unit_size // coreLimit_rounded_up) * unit_count,
)
else:
if coreLimit_rounded_up * num_of_threads > len(allCpus):
assert coreLimit_rounded_up * runs_per_unit > unit_size
need_HT = True
logging.warning(
"The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.",
len(allCpus) // coreLimit_rounded_up,
)
logging.debug(
"Going to assign at most %s runs per CPU/memory region, each one using %s cores and blocking %s cores on %s CPUs/memory regions.",
runs_per_unit,
coreLimit,
coreLimit_rounded_up,
units_per_run,
)
# Third, do the actual core assignment.
result = []
used_cores = set()
for run in range(num_of_threads):
# this calculation ensures that runs are split evenly across units
start_unit = (run * units_per_run) % unit_count
cores = []
cores_with_siblings = set()
for unit_nr in range(start_unit, start_unit + units_per_run):
assert len(cores) < coreLimit
# Some systems have non-contiguous unit numbers,
# so we take the i'th unit out of the list of available units.
# On normal system this is the identity mapping.
unit = units[unit_nr]
for core in cores_of_unit[unit]:
if core not in cores:
cores.extend(
c for c in siblings_of_core[core] if c not in used_cores
)
if len(cores) >= coreLimit:
break
cores_with_siblings.update(cores)
cores = cores[:coreLimit] # shrink if we got more cores than necessary
# remove used cores such that we do not try to use them again
cores_of_unit[unit] = [
core for core in cores_of_unit[unit] if core not in cores
]
assert (
len(cores) == coreLimit
), "Wrong number of cores for run {} of {} - previous results: {}, remaining cores per CPU/memory region: {}, current cores: {}".format(
run + 1, num_of_threads, result, cores_of_unit, cores
)
blocked_cores = cores if need_HT else cores_with_siblings
assert not used_cores.intersection(blocked_cores)
used_cores.update(blocked_cores)
result.append(sorted(cores))
assert len(result) == num_of_threads
assert all(len(cores) == coreLimit for cores in result)
assert len(set(itertools.chain(*result))) == num_of_threads * coreLimit, (
"Cores are not uniquely assigned to runs: " + result
)
logging.debug("Final core assignment: %s.", result)
return result
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug(
"Memory banks for cores %s are %s, of which we can use %s.",
cores,
list(mems),
allowedMems,
)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir("/sys/devices/system/node/"):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
def _get_memory_banks_listed_in_dir(path):
"""Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core)."""
# Such directories contain entries named "node<id>" for each memory bank
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith("node")]
def check_memory_size(memLimit, num_of_threads, memoryAssignment, my_cgroups):
"""Check whether the desired amount of parallel benchmarks fits in the memory.
Implemented are checks for memory limits via cgroup controller "memory" and
memory bank restrictions via cgroup controller "cpuset",
as well as whether the system actually has enough memory installed.
@param memLimit: the memory limit in bytes per run
@param num_of_threads: the number of parallel benchmark executions
@param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs)
"""
try:
# Check amount of memory allowed via cgroups.
def check_limit(actualLimit):
if actualLimit < memLimit:
sys.exit(
"Cgroups allow only {} bytes of memory to be used, cannot execute runs with {} bytes of memory.".format(
actualLimit, memLimit
)
)
elif actualLimit < memLimit * num_of_threads:
sys.exit(
"Cgroups allow only {} bytes of memory to be used, not enough for {} benchmarks with {} bytes each. Please reduce the number of threads".format(
actualLimit, num_of_threads, memLimit
)
)
if not os.path.isdir("/sys/devices/system/node/"):
logging.debug(
"System without NUMA support in Linux kernel, ignoring memory assignment."
)
return
if cgroups.MEMORY in my_cgroups:
# We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes
# because the former may be lower if memory.use_hierarchy is enabled.
for key, value in my_cgroups.get_key_value_pairs(cgroups.MEMORY, "stat"):
if (
key == "hierarchical_memory_limit"
or key == "hierarchical_memsw_limit"
):
check_limit(int(value))
# Get list of all memory banks, either from memory assignment or from system.
if not memoryAssignment:
if cgroups.CPUSET in my_cgroups:
allMems = my_cgroups.read_allowed_memory_banks()
else:
allMems = _get_memory_banks_listed_in_dir("/sys/devices/system/node/")
memoryAssignment = [
allMems
] * num_of_threads # "fake" memory assignment: all threads on all banks
else:
allMems = set(itertools.chain(*memoryAssignment))
memSizes = {mem: _get_memory_bank_size(mem) for mem in allMems}
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
# Check whether enough memory is allocatable on the assigned memory banks.
# As the sum of the sizes of the memory banks is at most the total size of memory in the system,
# and we do this check always even if the banks are not restricted,
# this also checks whether the system has actually enough memory installed.
usedMem = collections.Counter()
for mems_of_run in memoryAssignment:
totalSize = sum(memSizes[mem] for mem in mems_of_run)
if totalSize < memLimit:
sys.exit(
"Memory banks {} do not have enough memory for one run, only {} bytes available.".format(
mems_of_run, totalSize
)
)
usedMem[tuple(mems_of_run)] += memLimit
if usedMem[tuple(mems_of_run)] > totalSize:
sys.exit(
"Memory banks {} do not have enough memory for all runs, only {} bytes available. Please reduce the number of threads.".format(
mems_of_run, totalSize
)
)
def _get_memory_bank_size(memBank):
"""Get the size of a memory bank in bytes."""
fileName = "/sys/devices/system/node/node{0}/meminfo".format(memBank)
size = None
with open(fileName) as f:
for line in f:
if "MemTotal" in line:
size = line.split(":")[1].strip()
if size[-3:] != " kB":
raise ValueError(
'"{}" in file {} is not a memory size.'.format(size, fileName)
)
# kernel uses KiB but names them kB, convert to Byte
size = int(size[:-3]) * 1024
logging.debug("Memory bank %s has size %s bytes.", memBank, size)
return size
raise ValueError("Failed to read total memory from {}.".format(fileName))
def get_cpu_package_for_core(core):
"""Get the number of the physical package (socket) a core belongs to."""
return int(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/physical_package_id".format(core)
)
)
def get_cores_of_same_package_as(core):
return util.parse_int_list(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/core_siblings_list".format(core)
)
)
|
<filename>benchexec/resources.py
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
"""
This module contains functions for computing assignments of resources to runs.
"""
import collections
import itertools
import logging
import math
import os
import sys
from benchexec import cgroups
from benchexec import util
__all__ = [
"check_memory_size",
"get_cpu_cores_per_run",
"get_memory_banks_per_run",
"get_cpu_package_for_core",
]
def get_cpu_cores_per_run(
coreLimit, num_of_threads, use_hyperthreading, my_cgroups, coreSet=None
):
"""
Calculate an assignment of the available CPU cores to a number
of parallel benchmark executions such that each run gets its own cores
without overlapping of cores between runs.
In case the machine has hyper-threading, this method tries to avoid
putting two different runs on the same physical core
(but it does not guarantee this if the number of parallel runs is too high to avoid it).
In case the machine has multiple CPUs, this method avoids
splitting a run across multiple CPUs if the number of cores per run
is lower than the number of cores per CPU
(splitting a run over multiple CPUs provides worse performance).
It will also try to split the runs evenly across all available CPUs.
A few theoretically-possible cases are not implemented,
for example assigning three 10-core runs on a machine
with two 16-core CPUs (this would have unfair core assignment
and thus undesirable performance characteristics anyway).
The list of available cores is read from the cgroup file system,
such that the assigned cores are a subset of the cores
that the current process is allowed to use.
This script does currently not support situations
where the available cores are asymmetrically split over CPUs,
e.g. 3 cores on one CPU and 5 on another.
@param coreLimit: the number of cores for each run
@param num_of_threads: the number of parallel benchmark executions
@param coreSet: the list of CPU cores identifiers provided by a user, None makes benchexec using all cores
@return a list of lists, where each inner list contains the cores for one run
"""
try:
# read list of available CPU cores
allCpus = util.parse_int_list(my_cgroups.get_value(cgroups.CPUSET, "cpus"))
# Filter CPU cores according to the list of identifiers provided by a user
if coreSet:
invalid_cores = sorted(set(coreSet).difference(set(allCpus)))
if len(invalid_cores) > 0:
raise ValueError(
"The following provided CPU cores are not available: {}".format(
", ".join(map(str, invalid_cores))
)
)
allCpus = [core for core in allCpus if core in coreSet]
logging.debug("List of available CPU cores is %s.", allCpus)
# read mapping of core to memory region
cores_of_memory_region = collections.defaultdict(list)
for core in allCpus:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
memory_regions = _get_memory_banks_listed_in_dir(coreDir)
if memory_regions:
cores_of_memory_region[memory_regions[0]].append(core)
else:
# If some cores do not have NUMA information, skip using it completely
logging.warning(
"Kernel does not have NUMA support. Use benchexec at your own risk."
)
cores_of_memory_region = {}
break
logging.debug("Memory regions of cores are %s.", cores_of_memory_region)
# read mapping of core to CPU ("physical package")
cores_of_package = collections.defaultdict(list)
for core in allCpus:
package = get_cpu_package_for_core(core)
cores_of_package[package].append(core)
logging.debug("Physical packages of cores are %s.", cores_of_package)
# select the more fine grained division among memory regions and physical package
if len(cores_of_memory_region) >= len(cores_of_package):
cores_of_unit = cores_of_memory_region
logging.debug("Using memory regions as the basis for cpu core division")
else:
cores_of_unit = cores_of_package
logging.debug("Using physical packages as the basis for cpu core division")
# read hyper-threading information (sibling cores sharing the same physical core)
siblings_of_core = {}
for core in allCpus:
siblings = util.parse_int_list(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list".format(
core
)
)
)
siblings_of_core[core] = siblings
logging.debug("Siblings of cores are %s.", siblings_of_core)
except ValueError as e:
sys.exit("Could not read CPU information from kernel: {0}".format(e))
return _get_cpu_cores_per_run0(
coreLimit,
num_of_threads,
use_hyperthreading,
allCpus,
cores_of_unit,
siblings_of_core,
)
def _get_cpu_cores_per_run0(
coreLimit,
num_of_threads,
use_hyperthreading,
allCpus,
cores_of_unit,
siblings_of_core,
):
"""This method does the actual work of _get_cpu_cores_per_run
without reading the machine architecture from the file system
in order to be testable. For description, c.f. above.
Note that this method might change the input parameters!
Do not call it directly, call getCpuCoresPerRun()!
@param use_hyperthreading: A boolean to check if no-hyperthreading method is being used
@param allCpus: the list of all available cores
@param cores_of_unit: a mapping from logical unit (can be memory region (NUMA node) or physical package(CPU), depending on the architecture of system)
to lists of cores that belong to this unit
@param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core)
"""
# First, do some checks whether this algorithm has a chance to work.
if coreLimit > len(allCpus):
sys.exit(
"Cannot run benchmarks with {0} CPU cores, only {1} CPU cores available.".format(
coreLimit, len(allCpus)
)
)
if coreLimit * num_of_threads > len(allCpus):
sys.exit(
"Cannot run {0} benchmarks in parallel with {1} CPU cores each, only {2} CPU cores available. Please reduce the number of threads to {3}.".format(
num_of_threads, coreLimit, len(allCpus), len(allCpus) // coreLimit
)
)
if not use_hyperthreading:
unit_of_core = {}
unused_cores = []
for unit, cores in cores_of_unit.items():
for core in cores:
unit_of_core[core] = unit
for core, siblings in siblings_of_core.items():
if core in allCpus:
siblings.remove(core)
cores_of_unit[unit_of_core[core]] = [
c for c in cores_of_unit[unit_of_core[core]] if c not in siblings
]
siblings_of_core[core] = [core]
allCpus = [c for c in allCpus if c not in siblings]
else:
unused_cores.append(core)
for core in unused_cores:
siblings_of_core.pop(core)
logging.debug(
"Running in no-hyperthreading mode, avoiding the use of CPU cores {}".format(
unused_cores
)
)
unit_size = None # Number of cores per unit
for unit, cores in cores_of_unit.items():
if unit_size is None:
unit_size = len(cores)
elif unit_size != len(cores):
sys.exit(
"Asymmetric machine architecture not supported: CPU/memory region {0} has {1} cores, but other CPU/memory region has {2} cores.".format(
unit, len(cores), unit_size
)
)
core_size = None # Number of threads per core
for core, siblings in siblings_of_core.items():
if core_size is None:
core_size = len(siblings)
elif core_size != len(siblings):
sys.exit(
"Asymmetric machine architecture not supported: CPU core {0} has {1} siblings, but other core has {2} siblings.".format(
core, len(siblings), core_size
)
)
all_cpus_set = set(allCpus)
for core, siblings in siblings_of_core.items():
siblings_set = set(siblings)
if not siblings_set.issubset(all_cpus_set):
sys.exit(
"Core assignment is unsupported because siblings {0} of core {1} are not usable. Please always make all virtual cores of a physical core available.".format(
siblings_set.difference(all_cpus_set), core
)
)
# Second, compute some values we will need.
unit_count = len(cores_of_unit)
units = sorted(cores_of_unit.keys())
coreLimit_rounded_up = int(math.ceil(coreLimit / core_size) * core_size)
assert coreLimit <= coreLimit_rounded_up < (coreLimit + core_size)
units_per_run = int(math.ceil(coreLimit_rounded_up / unit_size))
if units_per_run > 1 and units_per_run * num_of_threads > unit_count:
sys.exit(
"Cannot split runs over multiple CPUs/memory regions and at the same time assign multiple runs to the same CPU/memory region. Please reduce the number of threads to {0}.".format(
unit_count // units_per_run
)
)
runs_per_unit = int(math.ceil(num_of_threads / unit_count))
assert units_per_run == 1 or runs_per_unit == 1
if units_per_run == 1 and runs_per_unit * coreLimit > unit_size:
sys.exit(
"Cannot run {} benchmarks with {} cores on {} CPUs/memory regions with {} cores, because runs would need to be split across multiple CPUs/memory regions. Please reduce the number of threads.".format(
num_of_threads, coreLimit, unit_count, unit_size
)
)
# Warn on misuse of hyper-threading
need_HT = False
if units_per_run == 1:
# Checking whether the total amount of usable physical cores is not enough,
# there might be some cores we cannot use, e.g. when scheduling with coreLimit=3 on quad-core machines.
# Thus we check per unit.
assert coreLimit * runs_per_unit <= unit_size
if coreLimit_rounded_up * runs_per_unit > unit_size:
need_HT = True
logging.warning(
"The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.",
(unit_size // coreLimit_rounded_up) * unit_count,
)
else:
if coreLimit_rounded_up * num_of_threads > len(allCpus):
assert coreLimit_rounded_up * runs_per_unit > unit_size
need_HT = True
logging.warning(
"The number of threads is too high and hyper-threading sibling cores need to be split among different runs, which makes benchmarking unreliable. Please reduce the number of threads to %s.",
len(allCpus) // coreLimit_rounded_up,
)
logging.debug(
"Going to assign at most %s runs per CPU/memory region, each one using %s cores and blocking %s cores on %s CPUs/memory regions.",
runs_per_unit,
coreLimit,
coreLimit_rounded_up,
units_per_run,
)
# Third, do the actual core assignment.
result = []
used_cores = set()
for run in range(num_of_threads):
# this calculation ensures that runs are split evenly across units
start_unit = (run * units_per_run) % unit_count
cores = []
cores_with_siblings = set()
for unit_nr in range(start_unit, start_unit + units_per_run):
assert len(cores) < coreLimit
# Some systems have non-contiguous unit numbers,
# so we take the i'th unit out of the list of available units.
# On normal system this is the identity mapping.
unit = units[unit_nr]
for core in cores_of_unit[unit]:
if core not in cores:
cores.extend(
c for c in siblings_of_core[core] if c not in used_cores
)
if len(cores) >= coreLimit:
break
cores_with_siblings.update(cores)
cores = cores[:coreLimit] # shrink if we got more cores than necessary
# remove used cores such that we do not try to use them again
cores_of_unit[unit] = [
core for core in cores_of_unit[unit] if core not in cores
]
assert (
len(cores) == coreLimit
), "Wrong number of cores for run {} of {} - previous results: {}, remaining cores per CPU/memory region: {}, current cores: {}".format(
run + 1, num_of_threads, result, cores_of_unit, cores
)
blocked_cores = cores if need_HT else cores_with_siblings
assert not used_cores.intersection(blocked_cores)
used_cores.update(blocked_cores)
result.append(sorted(cores))
assert len(result) == num_of_threads
assert all(len(cores) == coreLimit for cores in result)
assert len(set(itertools.chain(*result))) == num_of_threads * coreLimit, (
"Cores are not uniquely assigned to runs: " + result
)
logging.debug("Final core assignment: %s.", result)
return result
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug(
"Memory banks for cores %s are %s, of which we can use %s.",
cores,
list(mems),
allowedMems,
)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir("/sys/devices/system/node/"):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
def _get_memory_banks_listed_in_dir(path):
"""Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core)."""
# Such directories contain entries named "node<id>" for each memory bank
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith("node")]
def check_memory_size(memLimit, num_of_threads, memoryAssignment, my_cgroups):
"""Check whether the desired amount of parallel benchmarks fits in the memory.
Implemented are checks for memory limits via cgroup controller "memory" and
memory bank restrictions via cgroup controller "cpuset",
as well as whether the system actually has enough memory installed.
@param memLimit: the memory limit in bytes per run
@param num_of_threads: the number of parallel benchmark executions
@param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs)
"""
try:
# Check amount of memory allowed via cgroups.
def check_limit(actualLimit):
if actualLimit < memLimit:
sys.exit(
"Cgroups allow only {} bytes of memory to be used, cannot execute runs with {} bytes of memory.".format(
actualLimit, memLimit
)
)
elif actualLimit < memLimit * num_of_threads:
sys.exit(
"Cgroups allow only {} bytes of memory to be used, not enough for {} benchmarks with {} bytes each. Please reduce the number of threads".format(
actualLimit, num_of_threads, memLimit
)
)
if not os.path.isdir("/sys/devices/system/node/"):
logging.debug(
"System without NUMA support in Linux kernel, ignoring memory assignment."
)
return
if cgroups.MEMORY in my_cgroups:
# We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes
# because the former may be lower if memory.use_hierarchy is enabled.
for key, value in my_cgroups.get_key_value_pairs(cgroups.MEMORY, "stat"):
if (
key == "hierarchical_memory_limit"
or key == "hierarchical_memsw_limit"
):
check_limit(int(value))
# Get list of all memory banks, either from memory assignment or from system.
if not memoryAssignment:
if cgroups.CPUSET in my_cgroups:
allMems = my_cgroups.read_allowed_memory_banks()
else:
allMems = _get_memory_banks_listed_in_dir("/sys/devices/system/node/")
memoryAssignment = [
allMems
] * num_of_threads # "fake" memory assignment: all threads on all banks
else:
allMems = set(itertools.chain(*memoryAssignment))
memSizes = {mem: _get_memory_bank_size(mem) for mem in allMems}
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e))
# Check whether enough memory is allocatable on the assigned memory banks.
# As the sum of the sizes of the memory banks is at most the total size of memory in the system,
# and we do this check always even if the banks are not restricted,
# this also checks whether the system has actually enough memory installed.
usedMem = collections.Counter()
for mems_of_run in memoryAssignment:
totalSize = sum(memSizes[mem] for mem in mems_of_run)
if totalSize < memLimit:
sys.exit(
"Memory banks {} do not have enough memory for one run, only {} bytes available.".format(
mems_of_run, totalSize
)
)
usedMem[tuple(mems_of_run)] += memLimit
if usedMem[tuple(mems_of_run)] > totalSize:
sys.exit(
"Memory banks {} do not have enough memory for all runs, only {} bytes available. Please reduce the number of threads.".format(
mems_of_run, totalSize
)
)
def _get_memory_bank_size(memBank):
"""Get the size of a memory bank in bytes."""
fileName = "/sys/devices/system/node/node{0}/meminfo".format(memBank)
size = None
with open(fileName) as f:
for line in f:
if "MemTotal" in line:
size = line.split(":")[1].strip()
if size[-3:] != " kB":
raise ValueError(
'"{}" in file {} is not a memory size.'.format(size, fileName)
)
# kernel uses KiB but names them kB, convert to Byte
size = int(size[:-3]) * 1024
logging.debug("Memory bank %s has size %s bytes.", memBank, size)
return size
raise ValueError("Failed to read total memory from {}.".format(fileName))
def get_cpu_package_for_core(core):
"""Get the number of the physical package (socket) a core belongs to."""
return int(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/physical_package_id".format(core)
)
)
def get_cores_of_same_package_as(core):
return util.parse_int_list(
util.read_file(
"/sys/devices/system/cpu/cpu{0}/topology/core_siblings_list".format(core)
)
)
|
en
| 0.86213
|
# This file is part of BenchExec, a framework for reliable benchmarking: # https://github.com/sosy-lab/benchexec # # SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org> # # SPDX-License-Identifier: Apache-2.0 This module contains functions for computing assignments of resources to runs. Calculate an assignment of the available CPU cores to a number of parallel benchmark executions such that each run gets its own cores without overlapping of cores between runs. In case the machine has hyper-threading, this method tries to avoid putting two different runs on the same physical core (but it does not guarantee this if the number of parallel runs is too high to avoid it). In case the machine has multiple CPUs, this method avoids splitting a run across multiple CPUs if the number of cores per run is lower than the number of cores per CPU (splitting a run over multiple CPUs provides worse performance). It will also try to split the runs evenly across all available CPUs. A few theoretically-possible cases are not implemented, for example assigning three 10-core runs on a machine with two 16-core CPUs (this would have unfair core assignment and thus undesirable performance characteristics anyway). The list of available cores is read from the cgroup file system, such that the assigned cores are a subset of the cores that the current process is allowed to use. This script does currently not support situations where the available cores are asymmetrically split over CPUs, e.g. 3 cores on one CPU and 5 on another. @param coreLimit: the number of cores for each run @param num_of_threads: the number of parallel benchmark executions @param coreSet: the list of CPU cores identifiers provided by a user, None makes benchexec using all cores @return a list of lists, where each inner list contains the cores for one run # read list of available CPU cores # Filter CPU cores according to the list of identifiers provided by a user # read mapping of core to memory region # If some cores do not have NUMA information, skip using it completely # read mapping of core to CPU ("physical package") # select the more fine grained division among memory regions and physical package # read hyper-threading information (sibling cores sharing the same physical core) This method does the actual work of _get_cpu_cores_per_run without reading the machine architecture from the file system in order to be testable. For description, c.f. above. Note that this method might change the input parameters! Do not call it directly, call getCpuCoresPerRun()! @param use_hyperthreading: A boolean to check if no-hyperthreading method is being used @param allCpus: the list of all available cores @param cores_of_unit: a mapping from logical unit (can be memory region (NUMA node) or physical package(CPU), depending on the architecture of system) to lists of cores that belong to this unit @param siblings_of_core: a mapping from each core to a list of sibling cores including the core itself (a sibling is a core sharing the same physical core) # First, do some checks whether this algorithm has a chance to work. # Number of cores per unit # Number of threads per core # Second, compute some values we will need. # Warn on misuse of hyper-threading # Checking whether the total amount of usable physical cores is not enough, # there might be some cores we cannot use, e.g. when scheduling with coreLimit=3 on quad-core machines. # Thus we check per unit. # Third, do the actual core assignment. # this calculation ensures that runs are split evenly across units # Some systems have non-contiguous unit numbers, # so we take the i'th unit out of the list of available units. # On normal system this is the identity mapping. # shrink if we got more cores than necessary # remove used cores such that we do not try to use them again Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores. # read list of available memory banks # All runs get the empty list of memory regions # because this system has no NUMA support Get all memory banks the kernel lists in a given directory. Such a directory can be /sys/devices/system/node/ (contains all memory banks) or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core). # Such directories contain entries named "node<id>" for each memory bank Check whether the desired amount of parallel benchmarks fits in the memory. Implemented are checks for memory limits via cgroup controller "memory" and memory bank restrictions via cgroup controller "cpuset", as well as whether the system actually has enough memory installed. @param memLimit: the memory limit in bytes per run @param num_of_threads: the number of parallel benchmark executions @param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs) # Check amount of memory allowed via cgroups. # We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes # because the former may be lower if memory.use_hierarchy is enabled. # Get list of all memory banks, either from memory assignment or from system. # "fake" memory assignment: all threads on all banks # Check whether enough memory is allocatable on the assigned memory banks. # As the sum of the sizes of the memory banks is at most the total size of memory in the system, # and we do this check always even if the banks are not restricted, # this also checks whether the system has actually enough memory installed. Get the size of a memory bank in bytes. # kernel uses KiB but names them kB, convert to Byte Get the number of the physical package (socket) a core belongs to.
| 2.877533
| 3
|
setup.py
|
qamplus/qamplus-pythonsdk
| 0
|
6625403
|
import sys
from os import path
from setuptools import setup, find_packages
EXCLUDE_FROM_PACKAGES = ['tests']
needs_mock = sys.version_info < (3, 3)
mock = ['mock'] if needs_mock else []
here = path.abspath(path.dirname(__file__))
version = "2.2.1"
setup(name='qamplus',
version=version,
description="QAMPlus SDK",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords='qamplus, sms, voice, mobile, emailing, api, messaging',
author='QAMPlus Corp.',
author_email='<EMAIL>',
url="https://github.com/jeniaoo/qamplus_python",
install_requires=['requests'],
test_suite='nose.collector',
tests_require=['nose', 'pytz'] + mock,
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
)
|
import sys
from os import path
from setuptools import setup, find_packages
EXCLUDE_FROM_PACKAGES = ['tests']
needs_mock = sys.version_info < (3, 3)
mock = ['mock'] if needs_mock else []
here = path.abspath(path.dirname(__file__))
version = "2.2.1"
setup(name='qamplus',
version=version,
description="QAMPlus SDK",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords='qamplus, sms, voice, mobile, emailing, api, messaging',
author='QAMPlus Corp.',
author_email='<EMAIL>',
url="https://github.com/jeniaoo/qamplus_python",
install_requires=['requests'],
test_suite='nose.collector',
tests_require=['nose', 'pytz'] + mock,
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
)
|
none
| 1
| 1.705091
| 2
|
|
CollisionDetection.py
|
Zel9689/Typehero
| 0
|
6625404
|
import numpy as np
dimension = 2
# 判斷是否跟方向D同方向(+-90度)
def SameDirection(direction, ao):
return np.dot(direction, ao) > 0
class Collider:
def __init__(self, vertices):
self.vertices = vertices
# 找到跟方向D內積最大的頂點
def FindFurthestPoint(self, direction):
maxDistance = np.finfo(float).min
for vertex in self.vertices:
distance = np.dot(vertex, direction)
if(distance > maxDistance):
maxDistance = distance
maxPoint = vertex
return maxPoint
# 兩個形狀找出的頂點所形成的向量(這些向量末點可以構成圖形,如果圖形包含原點在內則代表碰撞)
def Support(colliderA, colliderB, direction):
return np.subtract(colliderA.FindFurthestPoint(direction), colliderB.FindFurthestPoint(np.multiply(direction, -1)))
class Simplex:
def __init__(self):
self.points = (0,0,0,0)
self.size = 0
self.direction = (1, 0, 0)
def push_front(self, point):
self.points = (point, self.points[0], self.points[1], self.points[2])
self.size = min(self.size + 1, 4)
def GJK(colliderA, colliderB):
points = Simplex()
support = Support(colliderA, colliderB, points.direction)
points.push_front(support)
points.direction = np.multiply(support, -1)
while(True):
support = Support(colliderA, colliderB, points.direction)
if(np.dot(support, points.direction) <= 0):
return False
points.push_front(support)
if(NextSimplex(points, points.direction)):
return True
def NextSimplex(points, direction):
x = points.size
if(x == 2):
return Line(points, direction)
elif(x == 3):
return Triangle(points, direction)
elif(x == 4):
return Tetrahedron(points, direction)
return False
def Line(points, direction):
a = points.points[0]
b = points.points[1]
ab = np.subtract(b, a)
ao = np.multiply(a, -1)
if(SameDirection(ab, ao)):
direction = np.cross(ab, ao)
points.direction = np.cross(direction, ab)
else:
points.points = (a,0,0,0)
points.size = 1
points.direction = ao
return False
def Triangle(points, direction):
a = points.points[0]
b = points.points[1]
c = points.points[2]
ab = np.subtract(b, a)
ac = np.subtract(c, a)
ao = np.multiply(a, -1)
abc = np.cross(ab, ac)
if(SameDirection(np.cross(abc, ac), ao)):
if(SameDirection(ac, ao)):
points.points = (a, c, 0, 0)
points.size = 2
direction = np.cross(ac, ao)
points.direction = np.cross(direction, ac)
else:
points.points = (a, b, 0, 0)
return Line(points, direction)
else:
if(SameDirection(np.cross(ab, abc), ao)):
points.points = (a, b, 0, 0)
return Line(points, direction)
else:
if(SameDirection(abc, ao)):
points.direction = abc
else:
points.points = (a, c, b, 0)
points.direction = np.multiply(abc, -1)
if(dimension == 2):
return True
if(dimension == 3):
return False
def Tetrahedron(points, direction):
a = points.points[0]
b = points.points[1]
c = points.points[2]
d = points.points[3]
ab = np.subtract(b, a)
ac = np.subtract(c, a)
ad = np.subtract(d, a)
ao = np.multiply(a, -1)
abc = np.cross(ab, ac)
acd = np.cross(ac, ad)
adb = np.cross(ad, ab)
if(SameDirection(abc, ao)):
return Triangle((a, b, c, 0), direction)
if(SameDirection(acd, ao)):
return Triangle((a, c, d, 0), direction)
if(SameDirection(adb, ao)):
return Triangle((a, d, b, 0), direction)
return True
|
import numpy as np
dimension = 2
# 判斷是否跟方向D同方向(+-90度)
def SameDirection(direction, ao):
return np.dot(direction, ao) > 0
class Collider:
def __init__(self, vertices):
self.vertices = vertices
# 找到跟方向D內積最大的頂點
def FindFurthestPoint(self, direction):
maxDistance = np.finfo(float).min
for vertex in self.vertices:
distance = np.dot(vertex, direction)
if(distance > maxDistance):
maxDistance = distance
maxPoint = vertex
return maxPoint
# 兩個形狀找出的頂點所形成的向量(這些向量末點可以構成圖形,如果圖形包含原點在內則代表碰撞)
def Support(colliderA, colliderB, direction):
return np.subtract(colliderA.FindFurthestPoint(direction), colliderB.FindFurthestPoint(np.multiply(direction, -1)))
class Simplex:
def __init__(self):
self.points = (0,0,0,0)
self.size = 0
self.direction = (1, 0, 0)
def push_front(self, point):
self.points = (point, self.points[0], self.points[1], self.points[2])
self.size = min(self.size + 1, 4)
def GJK(colliderA, colliderB):
points = Simplex()
support = Support(colliderA, colliderB, points.direction)
points.push_front(support)
points.direction = np.multiply(support, -1)
while(True):
support = Support(colliderA, colliderB, points.direction)
if(np.dot(support, points.direction) <= 0):
return False
points.push_front(support)
if(NextSimplex(points, points.direction)):
return True
def NextSimplex(points, direction):
x = points.size
if(x == 2):
return Line(points, direction)
elif(x == 3):
return Triangle(points, direction)
elif(x == 4):
return Tetrahedron(points, direction)
return False
def Line(points, direction):
a = points.points[0]
b = points.points[1]
ab = np.subtract(b, a)
ao = np.multiply(a, -1)
if(SameDirection(ab, ao)):
direction = np.cross(ab, ao)
points.direction = np.cross(direction, ab)
else:
points.points = (a,0,0,0)
points.size = 1
points.direction = ao
return False
def Triangle(points, direction):
a = points.points[0]
b = points.points[1]
c = points.points[2]
ab = np.subtract(b, a)
ac = np.subtract(c, a)
ao = np.multiply(a, -1)
abc = np.cross(ab, ac)
if(SameDirection(np.cross(abc, ac), ao)):
if(SameDirection(ac, ao)):
points.points = (a, c, 0, 0)
points.size = 2
direction = np.cross(ac, ao)
points.direction = np.cross(direction, ac)
else:
points.points = (a, b, 0, 0)
return Line(points, direction)
else:
if(SameDirection(np.cross(ab, abc), ao)):
points.points = (a, b, 0, 0)
return Line(points, direction)
else:
if(SameDirection(abc, ao)):
points.direction = abc
else:
points.points = (a, c, b, 0)
points.direction = np.multiply(abc, -1)
if(dimension == 2):
return True
if(dimension == 3):
return False
def Tetrahedron(points, direction):
a = points.points[0]
b = points.points[1]
c = points.points[2]
d = points.points[3]
ab = np.subtract(b, a)
ac = np.subtract(c, a)
ad = np.subtract(d, a)
ao = np.multiply(a, -1)
abc = np.cross(ab, ac)
acd = np.cross(ac, ad)
adb = np.cross(ad, ab)
if(SameDirection(abc, ao)):
return Triangle((a, b, c, 0), direction)
if(SameDirection(acd, ao)):
return Triangle((a, c, d, 0), direction)
if(SameDirection(adb, ao)):
return Triangle((a, d, b, 0), direction)
return True
|
zh
| 0.980351
|
# 判斷是否跟方向D同方向(+-90度) # 找到跟方向D內積最大的頂點 # 兩個形狀找出的頂點所形成的向量(這些向量末點可以構成圖形,如果圖形包含原點在內則代表碰撞)
| 3.062836
| 3
|
scripts/reactor/undefined.py
|
G00dBye/YYMS
| 54
|
6625405
|
<filename>scripts/reactor/undefined.py<gh_stars>10-100
sm.chat("Unhandled reactor script, Please update us in MapleEllinel forums. ID: " + str(parentID))
sm.dispose()
|
<filename>scripts/reactor/undefined.py<gh_stars>10-100
sm.chat("Unhandled reactor script, Please update us in MapleEllinel forums. ID: " + str(parentID))
sm.dispose()
|
none
| 1
| 1.250584
| 1
|
|
automlib.py
|
nitishkthakur/automl
| 2
|
6625406
|
<filename>automlib.py<gh_stars>1-10
import pandas as pd
import numpy as np
from sklearn import preprocessing, model_selection, metrics, ensemble
from imblearn import over_sampling, under_sampling, combine
import pyswarm
import lightgbm as lgb
class psoregressor:
def __init__(self, population = 30, omega=0.5, phip=0.5, phig=0.5, maxiter=100, minstep=1e-3,
minfunc=1e-3, debug=True, params = {'n_estimators': [50, 2500], 'max_depth': [2, 10],
'max_features': [.1, 1], 'subsample': [.1, 1],
'learning_rate': [.01, .90], 'min_samples_leaf': [1, 400]}, cv = 5):
self.pop = population
self.omega = omega
self.phip = phip
self.phig = phig
self.maxiter = maxiter
self.minstep = minstep
self.minfunc = minfunc
self.debug = debug
self.param_dictionary = params
self.lb = [50, 2, .1, .1, .01, 1]
self.ub = [2500, 10, 1, 1, .90, 400]
self.hyperparameters = ['n_estimators', 'max_depth', 'max_features', 'subsample', 'learning_rate', 'min_samples_leaf']
self.cv = cv
def fit(self, X, y):
#X_train_inner, X_test_inner, y_train_inner, y_test_inner = model_selection.train_test_split(X, y, test_size = 0.2, random_state = 40, shuffle = True)
X = np.array(X)
y = np.array(y)
# Define Objective function
def obj(x):
params = {self.hyperparameters[0]: int(x[0]), self.hyperparameters[1]: int(x[1]), self.hyperparameters[2]: x[2],
self.hyperparameters[3]: x[3], self.hyperparameters[4]: x[4], self.hyperparameters[5]: int(x[5])}
kf = model_selection.KFold(n_splits=self.cv)
kf.get_n_splits(X)
rmse = []
for train_index, test_index in kf.split(np.array(X)):
X_train_inner, X_test_inner = np.array(X)[train_index], np.array(X)[test_index]
y_train_inner, y_test_inner = np.array(y)[train_index], np.array(y)[test_index]
model = lgb.LGBMRegressor(**params).fit(X_train_inner, y_train_inner)
rmse.append(metrics.mean_squared_error(y_test_inner, model.predict(X_test_inner)))
rmse = np.mean(rmse)
return rmse
# Perform Model Optimization
xopt, fopt = pyswarm.pso(func = obj, lb = self.lb, ub = self.ub, swarmsize = self.pop, phip = self.phip, phig = self.phig,
omega = self.omega, maxiter =self.maxiter, minstep = self.minstep, minfunc = self.minfunc,
debug = self.debug)
# Fit the best model
hyperpara_optimized = {self.hyperparameters[0]: int(xopt[0]), self.hyperparameters[1]: int(xopt[1]), self.hyperparameters[2]: xopt[2],
self.hyperparameters[3]: xopt[3], self.hyperparameters[4]: xopt[4], self.hyperparameters[5]: int(xopt[5])}
self.fitted_model = lgb.LGBMRegressor(**hyperpara_optimized).fit(X, y)
return self
def predict(self, X):
return self.fitted_model.predict(X)
#############################################################################################################################################
###### PSO based Classifier ########
## Lightgbm
class psoclassifier:
def __init__(self, params = {'n_estimators': [20, 2500], 'max_depth': [2, 10], 'min_data_in_leaf': [3, 200],
'learning_rate': [0.01, 0.9], 'subsample': [0.1, 1], 'feature_fraction': [.01, 1],
'reg_lambda': [0.1, 5], 'num_leaves': [2, 700]},
swarmsize = 25, omega=0.5, phip=0.5, phig=0.5, maxiter=100, minstep=1e-1,
minfunc=1e-1, debug=True, cv = 5, top_n = 3, sample = 'oversample'):
print('classifier imported')
# Initialize hyperparameters of Optimizer
self.swarmsize = swarmsize
self.maxiter = maxiter
self.omega = omega
self.phip = phip
self.phig = phig
self.maxiter = maxiter
self.minstep = minstep
self.minfunc = minfunc
self.debug = debug
self.bounds = list(params.values())
self.top_n = top_n
self.fitted_status = False
# Initialize model related parameters
self.cv = cv
self.params = params;
self.param_names = list(params.keys())
self.sample = sample
# Print parameter details
print('Parameters to tune and bounds: \n')
print(pd.DataFrame(params, index = ['LB', 'UB']))
# initialize model list
self.model_list = []
self.score = []
self.upper_uncertainty = []
self.lower_uncertainty = []
def set_params(self, params_update):
bounds_temp = list(params_update.values())
param_names = list(params_update.keys())
# Print parameter details
print('Parameters to Update: ', param_names)
print('New Parameter bounds: ', bounds_temp)
for key in self.params:
if key in param_names:
print('Updating: ', key, ' from ', self.params[key], ' to ', params_update[key])
self.params[key] = params_update[key]
def get_params(self):
return self.params.copy()
def get_scores(self):
return self.score.copy()
def fit(self, X, y):
# Get split indices
kf = model_selection.StratifiedKFold(n_splits=self.cv, random_state = 0)
kf.get_n_splits(X)
# initialize model list
self.model_list = []
self.score = []
self.model_name = []
print('\n\n Tuning Models \n\n')
split_index = 1
# Split into train test
for train_index, test_index in kf.split(X,y):
X_train, X_test = np.array(X)[train_index], np.array(X)[test_index]
y_train, y_test = np.array(y).ravel()[train_index], np.array(y).ravel()[test_index]
if self.sample == 'oversample':
X_train, y_train = over_sampling.SMOTE(random_state = 12).fit_resample(X_train, y_train)
if self.sample == 'undersample':
X_train, y_train = under_sampling.EditedNearestNeighbours(random_state = 12).fit_resample(X_train, y_train)
if self.sample == 'balance':
X_train, y_train = combine.SMOTEENN(random_state = 12).fit_resample(X_train, y_train)
# Define objective function
def obj(x):
params = {'n_estimators': int(x[0]), 'max_depth': int(x[1]), 'min_data_in_leaf': int(x[2]),
'learning_rate': x[3], 'subsample': x[4], 'feature_fraction': x[5],
'reg_lambda': x[6], 'num_leaves': int(x[7])}
# Fit required model:
model_sel = lgb.LGBMClassifier(**params).fit(X_train, y_train, eval_set = [(X_test, y_test)],
eval_metric = 'multi_logloss', verbose = False)
# Evaluate rmse
score = -metrics.accuracy_score(y_test, model_sel.predict(X_test))
return score
if split_index == 1:
## Optimize
pso = pyswarm.pso(func = obj, lb = [val[0] for val in self.bounds], ub = [val[1] for val in self.bounds],
swarmsize=self.swarmsize, omega=self.omega, phip=self.phip, phig=self.phig,
maxiter=self.maxiter, minstep=self.minstep, minfunc=self.minfunc, debug=self.debug)
# Get tuned hyperparameters
x = pso[0]
params_tuned = {'n_estimators': int(x[0]), 'max_depth': int(x[1]), 'min_data_in_leaf': int(x[2]),
'learning_rate': x[3], 'subsample': x[4], 'feature_fraction': x[5],
'reg_lambda': x[6], 'num_leaves': int(x[7])}
# Get Fitted, tuned model
fitted_model = lgb.LGBMClassifier(**params_tuned).fit(X_train, y_train, eval_set = [(X_test, y_test)],
eval_metric = 'multi_logloss', verbose = False)
self.model_name.append('model' + str(split_index))
# Calculate Score
score_temp = metrics.accuracy_score(y_test, fitted_model.predict(X_test))
self.score.append(score_temp)
print('Metric: ', score_temp, '\n\n')
# Append model to final list of models
self.model_list.append(fitted_model)
# Print fold errors
print(self.score)
split_index = split_index + 1
model_rank = pd.DataFrame(data = list(zip(self.model_list, self.score)),
index = self.model_name, columns = ['model', 'score'])
self.top_n_model_list = model_rank.sort_values('score')[:self.top_n].model.values.tolist()
print('\n All Models trained: \n', model_rank['score'])
#print('\n \n Models Selected by voting: \n \n', model_rank.sort_values('score')[:self.top_n]['score'])
self.voting_classifier = ensemble.VotingClassifier(estimators = list(zip(self.model_name, self.model_list)), voting = 'soft').fit(X,y)
self.fitted_status = True
return self
def predict(self, X):
return self.voting_classifier.predict(X)
|
<filename>automlib.py<gh_stars>1-10
import pandas as pd
import numpy as np
from sklearn import preprocessing, model_selection, metrics, ensemble
from imblearn import over_sampling, under_sampling, combine
import pyswarm
import lightgbm as lgb
class psoregressor:
def __init__(self, population = 30, omega=0.5, phip=0.5, phig=0.5, maxiter=100, minstep=1e-3,
minfunc=1e-3, debug=True, params = {'n_estimators': [50, 2500], 'max_depth': [2, 10],
'max_features': [.1, 1], 'subsample': [.1, 1],
'learning_rate': [.01, .90], 'min_samples_leaf': [1, 400]}, cv = 5):
self.pop = population
self.omega = omega
self.phip = phip
self.phig = phig
self.maxiter = maxiter
self.minstep = minstep
self.minfunc = minfunc
self.debug = debug
self.param_dictionary = params
self.lb = [50, 2, .1, .1, .01, 1]
self.ub = [2500, 10, 1, 1, .90, 400]
self.hyperparameters = ['n_estimators', 'max_depth', 'max_features', 'subsample', 'learning_rate', 'min_samples_leaf']
self.cv = cv
def fit(self, X, y):
#X_train_inner, X_test_inner, y_train_inner, y_test_inner = model_selection.train_test_split(X, y, test_size = 0.2, random_state = 40, shuffle = True)
X = np.array(X)
y = np.array(y)
# Define Objective function
def obj(x):
params = {self.hyperparameters[0]: int(x[0]), self.hyperparameters[1]: int(x[1]), self.hyperparameters[2]: x[2],
self.hyperparameters[3]: x[3], self.hyperparameters[4]: x[4], self.hyperparameters[5]: int(x[5])}
kf = model_selection.KFold(n_splits=self.cv)
kf.get_n_splits(X)
rmse = []
for train_index, test_index in kf.split(np.array(X)):
X_train_inner, X_test_inner = np.array(X)[train_index], np.array(X)[test_index]
y_train_inner, y_test_inner = np.array(y)[train_index], np.array(y)[test_index]
model = lgb.LGBMRegressor(**params).fit(X_train_inner, y_train_inner)
rmse.append(metrics.mean_squared_error(y_test_inner, model.predict(X_test_inner)))
rmse = np.mean(rmse)
return rmse
# Perform Model Optimization
xopt, fopt = pyswarm.pso(func = obj, lb = self.lb, ub = self.ub, swarmsize = self.pop, phip = self.phip, phig = self.phig,
omega = self.omega, maxiter =self.maxiter, minstep = self.minstep, minfunc = self.minfunc,
debug = self.debug)
# Fit the best model
hyperpara_optimized = {self.hyperparameters[0]: int(xopt[0]), self.hyperparameters[1]: int(xopt[1]), self.hyperparameters[2]: xopt[2],
self.hyperparameters[3]: xopt[3], self.hyperparameters[4]: xopt[4], self.hyperparameters[5]: int(xopt[5])}
self.fitted_model = lgb.LGBMRegressor(**hyperpara_optimized).fit(X, y)
return self
def predict(self, X):
return self.fitted_model.predict(X)
#############################################################################################################################################
###### PSO based Classifier ########
## Lightgbm
class psoclassifier:
def __init__(self, params = {'n_estimators': [20, 2500], 'max_depth': [2, 10], 'min_data_in_leaf': [3, 200],
'learning_rate': [0.01, 0.9], 'subsample': [0.1, 1], 'feature_fraction': [.01, 1],
'reg_lambda': [0.1, 5], 'num_leaves': [2, 700]},
swarmsize = 25, omega=0.5, phip=0.5, phig=0.5, maxiter=100, minstep=1e-1,
minfunc=1e-1, debug=True, cv = 5, top_n = 3, sample = 'oversample'):
print('classifier imported')
# Initialize hyperparameters of Optimizer
self.swarmsize = swarmsize
self.maxiter = maxiter
self.omega = omega
self.phip = phip
self.phig = phig
self.maxiter = maxiter
self.minstep = minstep
self.minfunc = minfunc
self.debug = debug
self.bounds = list(params.values())
self.top_n = top_n
self.fitted_status = False
# Initialize model related parameters
self.cv = cv
self.params = params;
self.param_names = list(params.keys())
self.sample = sample
# Print parameter details
print('Parameters to tune and bounds: \n')
print(pd.DataFrame(params, index = ['LB', 'UB']))
# initialize model list
self.model_list = []
self.score = []
self.upper_uncertainty = []
self.lower_uncertainty = []
def set_params(self, params_update):
bounds_temp = list(params_update.values())
param_names = list(params_update.keys())
# Print parameter details
print('Parameters to Update: ', param_names)
print('New Parameter bounds: ', bounds_temp)
for key in self.params:
if key in param_names:
print('Updating: ', key, ' from ', self.params[key], ' to ', params_update[key])
self.params[key] = params_update[key]
def get_params(self):
return self.params.copy()
def get_scores(self):
return self.score.copy()
def fit(self, X, y):
# Get split indices
kf = model_selection.StratifiedKFold(n_splits=self.cv, random_state = 0)
kf.get_n_splits(X)
# initialize model list
self.model_list = []
self.score = []
self.model_name = []
print('\n\n Tuning Models \n\n')
split_index = 1
# Split into train test
for train_index, test_index in kf.split(X,y):
X_train, X_test = np.array(X)[train_index], np.array(X)[test_index]
y_train, y_test = np.array(y).ravel()[train_index], np.array(y).ravel()[test_index]
if self.sample == 'oversample':
X_train, y_train = over_sampling.SMOTE(random_state = 12).fit_resample(X_train, y_train)
if self.sample == 'undersample':
X_train, y_train = under_sampling.EditedNearestNeighbours(random_state = 12).fit_resample(X_train, y_train)
if self.sample == 'balance':
X_train, y_train = combine.SMOTEENN(random_state = 12).fit_resample(X_train, y_train)
# Define objective function
def obj(x):
params = {'n_estimators': int(x[0]), 'max_depth': int(x[1]), 'min_data_in_leaf': int(x[2]),
'learning_rate': x[3], 'subsample': x[4], 'feature_fraction': x[5],
'reg_lambda': x[6], 'num_leaves': int(x[7])}
# Fit required model:
model_sel = lgb.LGBMClassifier(**params).fit(X_train, y_train, eval_set = [(X_test, y_test)],
eval_metric = 'multi_logloss', verbose = False)
# Evaluate rmse
score = -metrics.accuracy_score(y_test, model_sel.predict(X_test))
return score
if split_index == 1:
## Optimize
pso = pyswarm.pso(func = obj, lb = [val[0] for val in self.bounds], ub = [val[1] for val in self.bounds],
swarmsize=self.swarmsize, omega=self.omega, phip=self.phip, phig=self.phig,
maxiter=self.maxiter, minstep=self.minstep, minfunc=self.minfunc, debug=self.debug)
# Get tuned hyperparameters
x = pso[0]
params_tuned = {'n_estimators': int(x[0]), 'max_depth': int(x[1]), 'min_data_in_leaf': int(x[2]),
'learning_rate': x[3], 'subsample': x[4], 'feature_fraction': x[5],
'reg_lambda': x[6], 'num_leaves': int(x[7])}
# Get Fitted, tuned model
fitted_model = lgb.LGBMClassifier(**params_tuned).fit(X_train, y_train, eval_set = [(X_test, y_test)],
eval_metric = 'multi_logloss', verbose = False)
self.model_name.append('model' + str(split_index))
# Calculate Score
score_temp = metrics.accuracy_score(y_test, fitted_model.predict(X_test))
self.score.append(score_temp)
print('Metric: ', score_temp, '\n\n')
# Append model to final list of models
self.model_list.append(fitted_model)
# Print fold errors
print(self.score)
split_index = split_index + 1
model_rank = pd.DataFrame(data = list(zip(self.model_list, self.score)),
index = self.model_name, columns = ['model', 'score'])
self.top_n_model_list = model_rank.sort_values('score')[:self.top_n].model.values.tolist()
print('\n All Models trained: \n', model_rank['score'])
#print('\n \n Models Selected by voting: \n \n', model_rank.sort_values('score')[:self.top_n]['score'])
self.voting_classifier = ensemble.VotingClassifier(estimators = list(zip(self.model_name, self.model_list)), voting = 'soft').fit(X,y)
self.fitted_status = True
return self
def predict(self, X):
return self.voting_classifier.predict(X)
|
en
| 0.404044
|
#X_train_inner, X_test_inner, y_train_inner, y_test_inner = model_selection.train_test_split(X, y, test_size = 0.2, random_state = 40, shuffle = True) # Define Objective function # Perform Model Optimization # Fit the best model ############################################################################################################################################# ###### PSO based Classifier ######## ## Lightgbm # Initialize hyperparameters of Optimizer # Initialize model related parameters # Print parameter details # initialize model list # Print parameter details # Get split indices # initialize model list # Split into train test # Define objective function # Fit required model: # Evaluate rmse ## Optimize # Get tuned hyperparameters # Get Fitted, tuned model # Calculate Score # Append model to final list of models # Print fold errors #print('\n \n Models Selected by voting: \n \n', model_rank.sort_values('score')[:self.top_n]['score'])
| 2.543703
| 3
|
NippoKun/report/models.py
|
KIKUYA-Takumi/Nippokun
| 0
|
6625407
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Report(models.Model):
report_author = models.ForeignKey(User, related_name='report_author')
report_title = models.CharField(max_length=50)
report_content = models.TextField(max_length=999)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Score(models.Model):
report = models.ForeignKey(Report, related_name='score')
score_author = models.ForeignKey(User, related_name='score_author')
score = models.IntegerField(
default=3,
choices=(
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)
)
)
evaluate_point = models.TextField(max_length=30)
comment = models.TextField(max_length=999, blank=True)
scored_at = models.DateTimeField(auto_now=True)
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Report(models.Model):
report_author = models.ForeignKey(User, related_name='report_author')
report_title = models.CharField(max_length=50)
report_content = models.TextField(max_length=999)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Score(models.Model):
report = models.ForeignKey(Report, related_name='score')
score_author = models.ForeignKey(User, related_name='score_author')
score = models.IntegerField(
default=3,
choices=(
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)
)
)
evaluate_point = models.TextField(max_length=30)
comment = models.TextField(max_length=999, blank=True)
scored_at = models.DateTimeField(auto_now=True)
|
en
| 0.963489
|
# Create your models here.
| 2.265966
| 2
|
src/pbn_api/migrations/0014_auto_20210713_2229.py
|
iplweb/django-bpp
| 1
|
6625408
|
<filename>src/pbn_api/migrations/0014_auto_20210713_2229.py
# Generated by Django 3.0.14 on 2021-07-13 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0013_oswiadczenieinstytucji_publikacjainstytucji"),
]
operations = [
migrations.AlterField(
model_name="publikacjainstytucji",
name="publicationVersion",
field=models.UUIDField(blank=True, null=True),
),
]
|
<filename>src/pbn_api/migrations/0014_auto_20210713_2229.py
# Generated by Django 3.0.14 on 2021-07-13 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0013_oswiadczenieinstytucji_publikacjainstytucji"),
]
operations = [
migrations.AlterField(
model_name="publikacjainstytucji",
name="publicationVersion",
field=models.UUIDField(blank=True, null=True),
),
]
|
en
| 0.823596
|
# Generated by Django 3.0.14 on 2021-07-13 20:29
| 1.386224
| 1
|
comet/convergence.py
|
raphael-group/comet
| 20
|
6625409
|
import math
import numpy as np
from scipy.stats import *
def tv(p, q):
""" Total variance distance """
return max([abs(p[i] - q[i]) for i in range(len(p))])
def discrete_convergence_eqb_plot(filelist, num_genes, ks_set, outprefix):
klets_seq, gset2weight = dict(), dict()
for f in filelist:
klets_seq[f] = list()
for l in open(f+'.key'):
v = l.rstrip().split("\t")
for i in range(len(v)):
gset, w = v[i].split(":")
allg = set()
for gg in gset.split(" "):
for g in gg.split(","):
allg.add(g)
klets_seq[f].append(dict(G=",".join(sorted(allg)), W=float(w)))
if ",".join(sorted(allg)) not in gset2weight:
gset2weight[",".join(sorted(allg))] = float(w)
s_length = len(klets_seq[f])
pq_list = dict()
tv_list = dict()
tv_w_list = dict()
last_eqb = int(math.ceil(s_length * 0.005))
interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
plot_x, plot_y, plot_yerr_low, plot_yerr_high = list(), list(), list(), list()
y2, y2_err_h, y2_err_l = list(), list(), list()
start_plot_index = int(s_length * 0.0) + last_eqb
start_interval = start_plot_index % interval_point
start_plot_index += start_interval
print "Interval point:", interval_point
print "Last n:", last_eqb
print "Start plot index:", start_plot_index
for i in range(start_plot_index, s_length, interval_point):
#for i in range(last_eqb, s_length):
tv_list[i] = list()
tv_w_list[i] = list()
#range_start = i - last_eqb + 1
range_start = 1
range_end = i
dict_f = dict()
sum_f = dict()
union_f = dict()
union_p = list()
union_p_w = list()
for f in klets_seq.keys():
#sum_f[f] -= klets_seq[f][i - last_eqb]['W']
#sum_f[f] += klets_seq[f][i]['W']
dict_f[f] = dict()
sum_f[f] = dict()
for j in range(range_start, range_end+1):
dict_f[f][klets_seq[f][j]['G']] = klets_seq[f][j]['W']
if klets_seq[f][j]['G'] not in sum_f[f]:
#sum_f[f] += klets_seq[f][j]['W']
sum_f[f][klets_seq[f][j]['G']] = 0
#sum_w[f][klets_seq[f][j]['G']] = 0
sum_f[f][klets_seq[f][j]['G']] += 1
#sum_w[f][klets_seq[f][j]['G']] += klets_seq[f][j]['W']
if klets_seq[f][j]['G'] not in union_f:
union_f[klets_seq[f][j]['G']] = 0
union_f[klets_seq[f][j]['G']] += 1
#union_w[klets_seq[f][j]['G']] += klets_seq[f][j]['W']
sum_union_w = sum([gset2weight[gset] for gset in union_f.keys()])
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq.keys())*float(range_end-range_start+1)))
union_p_w.append(gset2weight[gset] / sum_union_w)
for f in klets_seq.keys():
#p1_dict, p2_dict = dict(), dict()
p1_dict = sum_f[f]
p1_distrib = list()
p1_distrib_w = list()
sum_p1 = range_end - range_start + 1
sum_p1_w = sum([gset2weight[gset] if gset in sum_f[f] else 0 for gset in union_f.keys() ])
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_p1))
p1_distrib_w.append(gset2weight[gset]/sum_p1_w)
else:
p1_distrib.append(0)
p1_distrib_w.append(0)
tv_value = tv(p1_distrib, union_p)
tv_value_w = tv(p1_distrib_w, union_p_w)
tv_list[i].append(tv_value)
tv_w_list[i].append(tv_value_w)
#a = mean_confidence_interval(pq_list[i])
a2 = mean_confidence_interval(tv_list[i])
#a = mean_confidence_interval(tv_w_list[i])
#if i % interval_point == 0:
plot_x.append(i)
#plot_y.append(a[0])
#plot_yerr_low.append(a[0] - a[1])
#plot_yerr_high.append(a[2] - a[0])
y2.append(a2[0])
y2_err_h.append(a2[0] - a2[1])
y2_err_l.append(a2[2] - a2[0])
#plot_errorbar(plot_x, plot_y, y2, plot_yerr_low, y2_err_l, plot_yerr_high, y2_err_h, outprefix)
#plot_errorbar(plot_x, y2, y2_err_l, y2_err_h, outprefix)
return y2[-1]
def discrete_convergence(klets_seq, iter_num):
#keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection])))
tv_list = list()
#last_eqb = int(math.ceil(s_length * 0.005))
#interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
sum_num = iter_num
sum_f = dict()
union_f = dict()
union_p = list()
for f in range(len(klets_seq)):
sum_f[f] = dict()
for j in klets_seq[f].keys():
sum_f[f][j] = klets_seq[f][j]['freq']
if j not in union_f:
union_f[j] = 0
union_f[j] += klets_seq[f][j]['freq']
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq)*float(sum_num)))
for f in range(len(klets_seq)):
p1_dict = sum_f[f]
p1_distrib = list()
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_num))
else:
p1_distrib.append(0)
tv_value = tv(p1_distrib, union_p)
tv_list.append(tv_value)
a2 = mean_confidence_interval(tv_list)
return a2[0]
def discrete_convergence_check(klets_seq, s_length, conv_start):
#keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection])))
tv_list = list()
#last_eqb = int(math.ceil(s_length * 0.005))
#interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
sum_num = 0
sum_f = dict()
union_f = dict()
union_p = list()
for f in klets_seq.keys():
sum_f[f] = dict()
for j in range(len(klets_seq[f])):
if klets_seq[f][j] not in sum_f[f]:
sum_f[f][klets_seq[f][j]] = 0
sum_f[f][klets_seq[f][j]] += 1
if klets_seq[f][j] not in union_f:
union_f[klets_seq[f][j]] = 0
union_f[klets_seq[f][j]] += 1
sum_num = len(klets_seq[f])
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq.keys())*float(sum_num)))
for f in klets_seq.keys():
p1_dict = sum_f[f]
p1_distrib = list()
# sum_p1 = range_end - range_start + 1
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_num))
else:
p1_distrib.append(0)
tv_value = tv(p1_distrib, union_p)
tv_list.append(tv_value)
a2 = mean_confidence_interval(tv_list)
return a2[0]
#def plot_errorbar(x, y, y2, yerr_l, y2_err_l, yerr_h, y2_err_h, outprefix):
def plot_errorbar(x, y2, y2_err_l, y2_err_h, outprefix):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.figure()
#plt.errorbar(x, y , [yerr_l, yerr_h], marker='o')
plt.errorbar(x, y2 , [y2_err_l, y2_err_h], marker='x')
plt.savefig(outprefix + '.freq.run.png')
def mean_confidence_interval(data, confidence=0.75):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
|
import math
import numpy as np
from scipy.stats import *
def tv(p, q):
""" Total variance distance """
return max([abs(p[i] - q[i]) for i in range(len(p))])
def discrete_convergence_eqb_plot(filelist, num_genes, ks_set, outprefix):
klets_seq, gset2weight = dict(), dict()
for f in filelist:
klets_seq[f] = list()
for l in open(f+'.key'):
v = l.rstrip().split("\t")
for i in range(len(v)):
gset, w = v[i].split(":")
allg = set()
for gg in gset.split(" "):
for g in gg.split(","):
allg.add(g)
klets_seq[f].append(dict(G=",".join(sorted(allg)), W=float(w)))
if ",".join(sorted(allg)) not in gset2weight:
gset2weight[",".join(sorted(allg))] = float(w)
s_length = len(klets_seq[f])
pq_list = dict()
tv_list = dict()
tv_w_list = dict()
last_eqb = int(math.ceil(s_length * 0.005))
interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
plot_x, plot_y, plot_yerr_low, plot_yerr_high = list(), list(), list(), list()
y2, y2_err_h, y2_err_l = list(), list(), list()
start_plot_index = int(s_length * 0.0) + last_eqb
start_interval = start_plot_index % interval_point
start_plot_index += start_interval
print "Interval point:", interval_point
print "Last n:", last_eqb
print "Start plot index:", start_plot_index
for i in range(start_plot_index, s_length, interval_point):
#for i in range(last_eqb, s_length):
tv_list[i] = list()
tv_w_list[i] = list()
#range_start = i - last_eqb + 1
range_start = 1
range_end = i
dict_f = dict()
sum_f = dict()
union_f = dict()
union_p = list()
union_p_w = list()
for f in klets_seq.keys():
#sum_f[f] -= klets_seq[f][i - last_eqb]['W']
#sum_f[f] += klets_seq[f][i]['W']
dict_f[f] = dict()
sum_f[f] = dict()
for j in range(range_start, range_end+1):
dict_f[f][klets_seq[f][j]['G']] = klets_seq[f][j]['W']
if klets_seq[f][j]['G'] not in sum_f[f]:
#sum_f[f] += klets_seq[f][j]['W']
sum_f[f][klets_seq[f][j]['G']] = 0
#sum_w[f][klets_seq[f][j]['G']] = 0
sum_f[f][klets_seq[f][j]['G']] += 1
#sum_w[f][klets_seq[f][j]['G']] += klets_seq[f][j]['W']
if klets_seq[f][j]['G'] not in union_f:
union_f[klets_seq[f][j]['G']] = 0
union_f[klets_seq[f][j]['G']] += 1
#union_w[klets_seq[f][j]['G']] += klets_seq[f][j]['W']
sum_union_w = sum([gset2weight[gset] for gset in union_f.keys()])
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq.keys())*float(range_end-range_start+1)))
union_p_w.append(gset2weight[gset] / sum_union_w)
for f in klets_seq.keys():
#p1_dict, p2_dict = dict(), dict()
p1_dict = sum_f[f]
p1_distrib = list()
p1_distrib_w = list()
sum_p1 = range_end - range_start + 1
sum_p1_w = sum([gset2weight[gset] if gset in sum_f[f] else 0 for gset in union_f.keys() ])
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_p1))
p1_distrib_w.append(gset2weight[gset]/sum_p1_w)
else:
p1_distrib.append(0)
p1_distrib_w.append(0)
tv_value = tv(p1_distrib, union_p)
tv_value_w = tv(p1_distrib_w, union_p_w)
tv_list[i].append(tv_value)
tv_w_list[i].append(tv_value_w)
#a = mean_confidence_interval(pq_list[i])
a2 = mean_confidence_interval(tv_list[i])
#a = mean_confidence_interval(tv_w_list[i])
#if i % interval_point == 0:
plot_x.append(i)
#plot_y.append(a[0])
#plot_yerr_low.append(a[0] - a[1])
#plot_yerr_high.append(a[2] - a[0])
y2.append(a2[0])
y2_err_h.append(a2[0] - a2[1])
y2_err_l.append(a2[2] - a2[0])
#plot_errorbar(plot_x, plot_y, y2, plot_yerr_low, y2_err_l, plot_yerr_high, y2_err_h, outprefix)
#plot_errorbar(plot_x, y2, y2_err_l, y2_err_h, outprefix)
return y2[-1]
def discrete_convergence(klets_seq, iter_num):
#keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection])))
tv_list = list()
#last_eqb = int(math.ceil(s_length * 0.005))
#interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
sum_num = iter_num
sum_f = dict()
union_f = dict()
union_p = list()
for f in range(len(klets_seq)):
sum_f[f] = dict()
for j in klets_seq[f].keys():
sum_f[f][j] = klets_seq[f][j]['freq']
if j not in union_f:
union_f[j] = 0
union_f[j] += klets_seq[f][j]['freq']
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq)*float(sum_num)))
for f in range(len(klets_seq)):
p1_dict = sum_f[f]
p1_distrib = list()
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_num))
else:
p1_distrib.append(0)
tv_value = tv(p1_distrib, union_p)
tv_list.append(tv_value)
a2 = mean_confidence_interval(tv_list)
return a2[0]
def discrete_convergence_check(klets_seq, s_length, conv_start):
#keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection])))
tv_list = list()
#last_eqb = int(math.ceil(s_length * 0.005))
#interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1
sum_num = 0
sum_f = dict()
union_f = dict()
union_p = list()
for f in klets_seq.keys():
sum_f[f] = dict()
for j in range(len(klets_seq[f])):
if klets_seq[f][j] not in sum_f[f]:
sum_f[f][klets_seq[f][j]] = 0
sum_f[f][klets_seq[f][j]] += 1
if klets_seq[f][j] not in union_f:
union_f[klets_seq[f][j]] = 0
union_f[klets_seq[f][j]] += 1
sum_num = len(klets_seq[f])
for gset in sorted(union_f.keys()):
union_p.append(union_f[gset]/(len(klets_seq.keys())*float(sum_num)))
for f in klets_seq.keys():
p1_dict = sum_f[f]
p1_distrib = list()
# sum_p1 = range_end - range_start + 1
for gset in sorted(union_f.keys()):
if gset in sum_f[f]:
p1_distrib.append(sum_f[f][gset]/float(sum_num))
else:
p1_distrib.append(0)
tv_value = tv(p1_distrib, union_p)
tv_list.append(tv_value)
a2 = mean_confidence_interval(tv_list)
return a2[0]
#def plot_errorbar(x, y, y2, yerr_l, y2_err_l, yerr_h, y2_err_h, outprefix):
def plot_errorbar(x, y2, y2_err_l, y2_err_h, outprefix):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.figure()
#plt.errorbar(x, y , [yerr_l, yerr_h], marker='o')
plt.errorbar(x, y2 , [y2_err_l, y2_err_h], marker='x')
plt.savefig(outprefix + '.freq.run.png')
def mean_confidence_interval(data, confidence=0.75):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), sem(a)
h = se * t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
|
en
| 0.240772
|
Total variance distance #for i in range(last_eqb, s_length): #range_start = i - last_eqb + 1 #sum_f[f] -= klets_seq[f][i - last_eqb]['W'] #sum_f[f] += klets_seq[f][i]['W'] #sum_f[f] += klets_seq[f][j]['W'] #sum_w[f][klets_seq[f][j]['G']] = 0 #sum_w[f][klets_seq[f][j]['G']] += klets_seq[f][j]['W'] #union_w[klets_seq[f][j]['G']] += klets_seq[f][j]['W'] #p1_dict, p2_dict = dict(), dict() #a = mean_confidence_interval(pq_list[i]) #a = mean_confidence_interval(tv_w_list[i]) #if i % interval_point == 0: #plot_y.append(a[0]) #plot_yerr_low.append(a[0] - a[1]) #plot_yerr_high.append(a[2] - a[0]) #plot_errorbar(plot_x, plot_y, y2, plot_yerr_low, y2_err_l, plot_yerr_high, y2_err_h, outprefix) #plot_errorbar(plot_x, y2, y2_err_l, y2_err_h, outprefix) #keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection]))) #last_eqb = int(math.ceil(s_length * 0.005)) #interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1 #keys_order.append(dict(K=key, W=sum([set2scores[M]["W"] for M in collection]))) #last_eqb = int(math.ceil(s_length * 0.005)) #interval_point = int(last_eqb*2) if int(last_eqb*2) > 1 else 1 # sum_p1 = range_end - range_start + 1 #def plot_errorbar(x, y, y2, yerr_l, y2_err_l, yerr_h, y2_err_h, outprefix): #plt.errorbar(x, y , [yerr_l, yerr_h], marker='o')
| 2.681783
| 3
|
api/urls.py
|
iafisher/writingstreak
| 2
|
6625410
|
<reponame>iafisher/writingstreak<filename>api/urls.py
from django.urls import path
from . import views
app_name = 'api'
urlpatterns = [
path('fetch', views.fetch, name='fetch'),
path('update', views.update, name='update'),
]
|
from django.urls import path
from . import views
app_name = 'api'
urlpatterns = [
path('fetch', views.fetch, name='fetch'),
path('update', views.update, name='update'),
]
|
none
| 1
| 1.759781
| 2
|
|
pytest-tests/infra/pages.py
|
itaiag/login-example
| 1
|
6625411
|
'''
Created on Jul 9, 2015
@author: Itai
'''
from infra.actionbot import Locator
from infra.base_page import BasePageObject
import tests.marks as m
class LoginPage(BasePageObject):
'''
classdocs
'''
_USERNAME_TB_BY = Locator.id("username")
_PASSWORD_TB_BY = Locator.id("password")
_REGISTER_LNK_BY = Locator.link_text("Register")
_LOGIN_BTN_BY = Locator.css_selector("div.form-actions > button")
_ALERT_MSG_BY = Locator.css_selector(".alert[ng-if]")
@m.step("In login page")
def __init__(self, driver):
'''
Construbdctor
'''
super(LoginPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h2[text()='Login']"))
@m.step("Type '{1}' to user tb")
def type_to_username_tb(self, username):
self.bot.send_keys_to_element_by(self._USERNAME_TB_BY, username)
@m.step("Type '{1}' to password tb")
def type_to_password_tb(self, password):
self.bot.send_keys_to_element_by(self._PASSWORD_TB_BY, password)
@m.step("Click on login btn and go to dashboard page")
def click_on_login_btn_and_go_to_dashboard_page(self):
self.bot.click_on_element_by(self._LOGIN_BTN_BY)
return DashboardPage(self.driver)
@m.step("Do failure")
def do_failure(self):
'''
The purpose of this method is to be an example for a failed action
'''
self.bot.click_on_element_by(Locator.id("NotExist"))
@m.step("Get alert message text")
def get_alert_msg_text(self):
return self.bot.get_element_text_by(self._ALERT_MSG_BY)
@m.step
def click_on_login_btn_and_stay_in_login_page(self):
self.bot.click_on_element_by(self._LOGIN_BTN_BY)
# We want to make sure that we are still in the login page.
return LoginPage(self.driver)
@m.step
def click_on_register_lnk_and_go_to_register_page(self):
self.bot.click_on_element_by(self._REGISTER_LNK_BY)
return RegisterPage(self.driver)
@m.step
def do_login_and_got_to_dashboard_page(self, username, password):
self.type_to_username_tb(username)
self.type_to_password_tb(password)
return self.click_on_login_btn_and_go_to_dashboard_page()
class RegisterPage(BasePageObject):
_FIRST_NAME_TB_BY = Locator.id("firstName")
_LAST_NAME_TB_BY = Locator.name("lastName")
_USERNAME_TB_BY = Locator.id("username")
_PASSWORD_TB_BY = Locator.id("password")
_REGISTER_BTN_BY = Locator.css_selector("div.form-actions > button")
@m.step("In register page")
def __init__(self, driver):
super(RegisterPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h2[text()='Register']"))
@m.step("Type '{1}' to first name tb")
def type_to_first_name_tb(self, firstName):
self.bot.send_keys_to_element_by(self._FIRST_NAME_TB_BY, firstName)
@m.step("Type '{1}' to last name tb")
def type_to_last_name_tb(self, lastName):
self.bot.send_keys_to_element_by(self._LAST_NAME_TB_BY, lastName)
@m.step("Type '{1}' to username tb")
def type_to_username_tb(self, username):
self.bot.send_keys_to_element_by(self._USERNAME_TB_BY, username)
@m.step("Type '{1}' to password tb")
def type_to_password_tb(self, password):
self.bot.send_keys_to_element_by(self._PASSWORD_TB_BY, password)
@m.step
def click_on_register_btn_and_go_to_login_page(self):
self.bot.click_on_element_by(self._REGISTER_BTN_BY)
return LoginPage(self.driver)
@m.step
def do_register_and_go_to_login_page(self, firstname, lastname, username, password):
self.type_to_first_name_tb(firstname)
self.type_to_last_name_tb(lastname)
self.type_to_username_tb(username)
self.type_to_password_tb(password)
return self.click_on_register_btn_and_go_to_login_page()
class DashboardPage(BasePageObject):
_LOGOUT_LNK = Locator.css_selector(".btn-primary")
@m.step("In dash board page")
def __init__(self, driver):
super(DashboardPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h1[contains(.,'Hi')]"))
@m.step("Click on delete user '{1}' lnk")
def click_on_delete_user_lnk(self, user_name):
self.bot.click_on_element_by(
Locator.xpath("//li[contains(.,'{0}')]/a".format(user_name)))
@m.step
def click_on_logout_btn_and_go_to_login_page(self):
self.bot.click_on_element_by(self._LOGOUT_LNK)
return LoginPage(self.driver)
|
'''
Created on Jul 9, 2015
@author: Itai
'''
from infra.actionbot import Locator
from infra.base_page import BasePageObject
import tests.marks as m
class LoginPage(BasePageObject):
'''
classdocs
'''
_USERNAME_TB_BY = Locator.id("username")
_PASSWORD_TB_BY = Locator.id("password")
_REGISTER_LNK_BY = Locator.link_text("Register")
_LOGIN_BTN_BY = Locator.css_selector("div.form-actions > button")
_ALERT_MSG_BY = Locator.css_selector(".alert[ng-if]")
@m.step("In login page")
def __init__(self, driver):
'''
Construbdctor
'''
super(LoginPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h2[text()='Login']"))
@m.step("Type '{1}' to user tb")
def type_to_username_tb(self, username):
self.bot.send_keys_to_element_by(self._USERNAME_TB_BY, username)
@m.step("Type '{1}' to password tb")
def type_to_password_tb(self, password):
self.bot.send_keys_to_element_by(self._PASSWORD_TB_BY, password)
@m.step("Click on login btn and go to dashboard page")
def click_on_login_btn_and_go_to_dashboard_page(self):
self.bot.click_on_element_by(self._LOGIN_BTN_BY)
return DashboardPage(self.driver)
@m.step("Do failure")
def do_failure(self):
'''
The purpose of this method is to be an example for a failed action
'''
self.bot.click_on_element_by(Locator.id("NotExist"))
@m.step("Get alert message text")
def get_alert_msg_text(self):
return self.bot.get_element_text_by(self._ALERT_MSG_BY)
@m.step
def click_on_login_btn_and_stay_in_login_page(self):
self.bot.click_on_element_by(self._LOGIN_BTN_BY)
# We want to make sure that we are still in the login page.
return LoginPage(self.driver)
@m.step
def click_on_register_lnk_and_go_to_register_page(self):
self.bot.click_on_element_by(self._REGISTER_LNK_BY)
return RegisterPage(self.driver)
@m.step
def do_login_and_got_to_dashboard_page(self, username, password):
self.type_to_username_tb(username)
self.type_to_password_tb(password)
return self.click_on_login_btn_and_go_to_dashboard_page()
class RegisterPage(BasePageObject):
_FIRST_NAME_TB_BY = Locator.id("firstName")
_LAST_NAME_TB_BY = Locator.name("lastName")
_USERNAME_TB_BY = Locator.id("username")
_PASSWORD_TB_BY = Locator.id("password")
_REGISTER_BTN_BY = Locator.css_selector("div.form-actions > button")
@m.step("In register page")
def __init__(self, driver):
super(RegisterPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h2[text()='Register']"))
@m.step("Type '{1}' to first name tb")
def type_to_first_name_tb(self, firstName):
self.bot.send_keys_to_element_by(self._FIRST_NAME_TB_BY, firstName)
@m.step("Type '{1}' to last name tb")
def type_to_last_name_tb(self, lastName):
self.bot.send_keys_to_element_by(self._LAST_NAME_TB_BY, lastName)
@m.step("Type '{1}' to username tb")
def type_to_username_tb(self, username):
self.bot.send_keys_to_element_by(self._USERNAME_TB_BY, username)
@m.step("Type '{1}' to password tb")
def type_to_password_tb(self, password):
self.bot.send_keys_to_element_by(self._PASSWORD_TB_BY, password)
@m.step
def click_on_register_btn_and_go_to_login_page(self):
self.bot.click_on_element_by(self._REGISTER_BTN_BY)
return LoginPage(self.driver)
@m.step
def do_register_and_go_to_login_page(self, firstname, lastname, username, password):
self.type_to_first_name_tb(firstname)
self.type_to_last_name_tb(lastname)
self.type_to_username_tb(username)
self.type_to_password_tb(password)
return self.click_on_register_btn_and_go_to_login_page()
class DashboardPage(BasePageObject):
_LOGOUT_LNK = Locator.css_selector(".btn-primary")
@m.step("In dash board page")
def __init__(self, driver):
super(DashboardPage, self).__init__(driver)
self.bot.wait_for_element_by(Locator.xpath("//h1[contains(.,'Hi')]"))
@m.step("Click on delete user '{1}' lnk")
def click_on_delete_user_lnk(self, user_name):
self.bot.click_on_element_by(
Locator.xpath("//li[contains(.,'{0}')]/a".format(user_name)))
@m.step
def click_on_logout_btn_and_go_to_login_page(self):
self.bot.click_on_element_by(self._LOGOUT_LNK)
return LoginPage(self.driver)
|
en
| 0.921126
|
Created on Jul 9, 2015 @author: Itai classdocs Construbdctor The purpose of this method is to be an example for a failed action # We want to make sure that we are still in the login page.
| 2.305597
| 2
|
atomize/device_modules/SR_865a.py
|
Anatoly1010/Atomize_ITC
| 1
|
6625412
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import gc
import sys
import pyvisa
from pyvisa.constants import StopBits, Parity
import atomize.device_modules.config.config_utils as cutil
import atomize.general_modules.general_functions as general
class SR_865a:
#### Basic interaction functions
def __init__(self):
#### Inizialization
# setting path to *.ini file
self.path_current_directory = os.path.dirname(__file__)
self.path_config_file = os.path.join(self.path_current_directory, 'config','SR_865a_config.ini')
# configuration data
self.config = cutil.read_conf_util(self.path_config_file)
self.specific_parameters = cutil.read_specific_parameters(self.path_config_file)
# auxilary dictionaries
self.sensitivity_dict = {'1 nV': 27, '2 nV': 26, '5 nV': 25, '10 nV': 24, '20 nV': 23, '50 nV': 22,
'100 nV': 21, '200 nV': 20, '500 nV': 19, '1 uV': 18, '2 uV': 17, '5 uV': 16,
'10 uV': 15, '20 uV': 14, '50 uV': 13, '100 uV': 12, '200 uV': 11, '500 uV': 10,
'1 mV': 9, '2 mV': 8, '5 mV': 7, '10 mV': 6, '20 mV': 5, '50 mV': 4,
'100 mV': 3, '200 mV': 2, '500 mV': 1, '1 V': 0};
self.helper_sens_list = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
self.timeconstant_dict = {'1 us': 0, '3 us': 1, '10 us': 2, '30 us': 3, '100 us': 4, '300 us': 5,
'1 ms': 6, '3 ms': 7, '10 ms': 8, '30 ms': 9, '100 ms': 10, '300 ms': 11,
'1 s': 12, '3 s': 13, '10 s': 14, '30 s': 15, '100 s': 16, '300 s': 17,
'1 ks': 18, '3 ks': 19, '10 ks': 20, '30 ks': 21};
self.helper_tc_list = [1, 3, 10, 30, 100, 300, 1000]
self.ref_mode_dict = {'Internal': 0, 'External': 1, 'Dual': 2, 'Chop': 3}
self.ref_slope_dict = {'Sine': 0, 'PosTTL': 1, 'NegTTL': 2}
self.sync_dict = {'Off': 0, 'On': 1}
self.lp_fil_dict = {'6 db': 0, '12 dB': 1, "18 dB": 2, "24 dB": 3}
# Ranges and limits
self.ref_freq_min = 0.001
self.ref_freq_max = 4000000
self.ref_ampl_min = 0.000000001
self.ref_ampl_max = 2
self.harm_max = 99
self.harm_min = 1
# Test run parameters
# These values are returned by the modules in the test run
if len(sys.argv) > 1:
self.test_flag = sys.argv[1]
else:
self.test_flag = 'None'
if self.test_flag != 'test':
if self.config['interface'] == 'gpib':
try:
import Gpib
self.status_flag = 1
self.device = Gpib.Gpib(self.config['board_address'], self.config['gpib_address'])
try:
# test should be here
self.device_write('*CLS')
answer = int(float(self.device_query('*TST?')))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.config['interface'] == 'rs232':
try:
self.status_flag = 1
rm = pyvisa.ResourceManager()
self.device = rm.open_resource(self.config['serial_address'], read_termination=self.config['read_termination'],
write_termination=self.config['write_termination'], baud_rate=self.config['baudrate'],
data_bits=self.config['databits'], parity=self.config['parity'], stop_bits=self.config['stopbits'])
self.device.timeout = self.config['timeout']; # in ms
try:
# test should be here
self.device_write('*CLS')
answer = int(self.device_query('*TST?'))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
self.status_flag = 0
general.message("No connection")
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.config['interface'] == 'ethernet':
try:
self.status_flag = 1
rm = pyvisa.ResourceManager()
self.device = rm.open_resource(self.config['ethernet_address'])
self.device.timeout = self.config['timeout'] # in ms
try:
# test should be here
self.device_write('*CLS')
answer = int(self.device_query('*TST?'))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit();
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.test_flag == 'test':
self.test_signal = 0.001
self.test_frequency = 10000
self.test_phase = 10
self.test_timeconstant = '10 ms'
self.test_amplitude = 0.3
self.test_sensitivity = '100 mV'
self.test_ref_mode = 'Internal'
self.test_ref_slope = 'Sine'
self.test_sync = 'On'
self.test_lp_filter = '6 dB'
self.test_harmonic = 1
def close_connection(self):
if self.test_flag != 'test':
self.status_flag = 0
gc.collect()
elif self.test_flag == 'test':
pass
def device_write(self, command):
if self.status_flag == 1:
command = str(command)
self.device.write(command)
else:
general.message("No Connection")
self.status_flag = 0
sys.exit()
def device_query(self, command):
if self.status_flag == 1:
if self.config['interface'] == 'gpib':
self.device.write(command)
general.wait('50 ms')
answer = self.device.read().decode()
elif self.config['interface'] == 'rs232':
answer = self.device.query(command)
elif self.config['interface'] == 'ethernet':
answer = self.device.query(command)
return answer
else:
general.message("No Connection")
self.status_flag = 0
sys.exit()
#### device specific functions
def lock_in_name(self):
if self.test_flag != 'test':
answer = self.device_query('*IDN?')
return answer
elif self.test_flag == 'test':
answer = self.config['name']
return answer
def lock_in_ref_frequency(self, *frequency):
if self.test_flag != 'test':
if len(frequency) == 1:
freq = float(frequency[0])
if freq >= self.ref_freq_min and freq <= self.ref_freq_max:
self.device_write('FREQ '+ str(freq))
else:
general.message("Incorrect frequency")
sys.exit()
elif len(frequency) == 0:
answer = float(self.device_query('FREQ?'))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(frequency) == 1:
freq = float(frequency[0])
assert(freq >= self.ref_freq_min and freq <= self.ref_freq_max), "Incorrect frequency is reached"
elif len(frequency) == 0:
answer = self.test_frequency
return answer
def lock_in_phase(self, *degree):
if self.test_flag != 'test':
if len(degree) == 1:
degs = float(degree[0])
if degs >= -360000 and degs <= 360000:
self.device_write('PHAS '+str(degs))
else:
general.message("Incorrect phase")
sys.exit()
elif len(degree) == 0:
answer = float(self.device_query('PHAS?'))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(degree) == 1:
degs = float(degree[0])
assert(degs >= -360000 and degs <= 360000), "Incorrect phase is reached"
elif len(degree) == 0:
answer = self.test_phase
return answer
def lock_in_time_constant(self, *timeconstant):
if self.test_flag != 'test':
if len(timeconstant) == 1:
temp = timeconstant[0].split(' ')
if float(temp[0]) > 30 and temp[1] == 'ks':
general.message("Desired sensitivity cannot be set, the nearest available value is used")
self.device_write("OFLT "+ str(21))
else:
number_tc = min(self.helper_tc_list, key=lambda x: abs(x - int(temp[0])))
if int(number_tc) == 1000 and temp[1] == 'us':
number_tc = 1
temp[1] = 'ms'
elif int(number_tc) == 1000 and temp[1] == 'ms':
number_tc = 1
temp[1] = 's'
elif int(number_tc) == 1000 and temp[1] == 's':
number_tc = 1
temp[1] = 'ks'
if int(number_tc) != int(temp[0]):
general.message("Desired time constant cannot be set, the nearest available value is used")
tc = str(number_tc) + ' ' + temp[1]
if tc in self.timeconstant_dict:
flag = self.timeconstant_dict[tc]
self.device_write("OFLT "+ str(flag))
else:
general.message("Invalid time constant value (too high/too low)")
sys.exit()
elif len(timeconstant) == 0:
raw_answer = int(self.device_query("OFLT?"))
answer = cutil.search_keys_dictionary(self.timeconstant_dict, raw_answer)
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(timeconstant) == 1:
temp = timeconstant[0].split(' ')
if float(temp[0]) > 30 and temp[1] == 'ks':
tc = '30 ks'
else:
number_tc = min(self.helper_tc_list, key=lambda x: abs(x - int(temp[0])))
if int(number_tc) == 1000 and temp[1] == 'us':
number_tc = 1
temp[1] = 'ms'
elif int(number_tc) == 1000 and temp[1] == 'ms':
number_tc = 1
temp[1] = 's'
elif int(number_tc) == 1000 and temp[1] == 's':
number_tc = 1
temp[1] = 'ks'
tc = str(number_tc) + ' ' + temp[1]
if tc in self.timeconstant_dict:
pass
else:
assert(1 == 2), "Incorrect time constant is used"
elif len(timeconstant) == 0:
answer = self.test_timeconstant
return answer
def lock_in_ref_amplitude(self, *amplitude):
if self.test_flag != 'test':
if len(amplitude) == 1:
ampl = float(amplitude[0]);
if ampl <= self.ref_ampl_max and ampl >= self.ref_ampl_min:
self.device_write('SLVL '+ str(ampl))
else:
self.device_write('SLVL '+ str(self.ref_ampl_min))
general.message("Invalid Argument")
sys.exit()
elif len(amplitude) == 0:
answer = float(self.device_query("SLVL?"))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(amplitude) == 1:
ampl = float(amplitude[0]);
assert(ampl <= self.ref_ampl_max and ampl >= self.ref_ampl_min), "Incorrect amplitude is reached"
elif len(amplitude) == 0:
answer = self.test_amplitude
return answer
def lock_in_get_data(self, *channel):
if self.test_flag != 'test':
if len(channel) == 0:
answer = float(self.device_query('OUTP? 0'))
return answer
elif len(channel) == 1 and int(channel[0]) == 1:
answer = float(self.device_query('OUTP? 0'))
return answer
elif len(channel) == 1 and int(channel[0]) == 2:
answer = float(self.device_query('OUTP? 1'))
return answer
elif len(channel) == 1 and int(channel[0]) == 3:
answer = float(self.device_query('OUTP? 2'))
return answer
elif len(channel) == 1 and int(channel[0]) == 4:
answer = float(self.device_query('OUTP? 3'))
return answer
elif len(channel) == 2 and int(channel[0]) == 1 and int(channel[1]) == 2:
answer_string = self.device_query('SNAP? 0,1')
answer_list = answer_string.split(',')
list_of_floats = [float(item) for item in answer_list]
x = list_of_floats[0]
y = list_of_floats[1]
return x, y
elif len(channel) == 3 and int(channel[0]) == 1 and int(channel[1]) == 2 and int(channel[2]) == 3:
answer_string = self.device_query('SNAP? 0,1,2')
answer_list = answer_string.split(',')
list_of_floats = [float(item) for item in answer_list]
x = list_of_floats[0]
y = list_of_floats[1]
r = list_of_floats[2]
return x, y, r
elif self.test_flag == 'test':
if len(channel) == 0:
answer = self.test_signal
return answer
elif len(channel) == 1:
assert(int(channel[0]) == 1 or int(channel[0]) == 2 or \
int(channel[0]) == 3 or int(channel[0]) == 4), 'Invalid channel is given'
answer = self.test_signal
return answer
elif len(channel) == 2 and int(channel[0]) == 1 and int(channel[1]) == 2:
x = y = self.test_signal
return x, y
elif len(channel) == 3 and int(channel[0]) == 1 and int(channel[1]) == 2 and int(channel[2]) == 3:
x = y = r = self.test_signal
return x, y, r
def lock_in_sensitivity(self, *sensitivity):
if self.test_flag != 'test':
if len(sensitivity) == 1:
temp = sensitivity[0].split(' ')
if float(temp[0]) > 1 and temp[1] == 'V':
general.message("Desired sensitivity cannot be set, the nearest available value is used")
self.device_write("SCAL "+ str(0))
else:
number_sens = min(self.helper_sens_list, key=lambda x: abs(x - int(temp[0])))
if int(number_sens) == 1000 and temp[1] == 'nV':
number_sens = 1
temp[1] = 'uV'
elif int(number_sens) == 1000 and temp[1] == 'uV':
number_sens = 1
temp[1] = 'mV'
elif int(number_sens) == 1000 and temp[1] == 'mV':
number_sens = 1
temp[1] = 'V'
sens = str(number_sens) + ' ' + temp[1]
if int(number_sens) != int(temp[0]):
general.message("Desired sensitivity cannot be set, the nearest available value is used")
if sens in self.sensitivity_dict:
flag = self.sensitivity_dict[sens]
self.device_write("SCAL "+ str(flag))
else:
general.message("Invalid sensitivity value (too high/too low)")
sys.exit()
elif len(sensitivity) == 0:
raw_answer = int(self.device_query("SCAL?"))
answer = cutil.search_keys_dictionary(self.sensitivity_dict, raw_answer)
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(sensitivity) == 1:
temp = sensitivity[0].split(' ')
if float(temp[0]) > 1 and temp[1] == 'V':
sens = '1 V'
else:
number_sens = min(self.helper_sens_list, key=lambda x: abs(x - int(temp[0])))
if int(number_sens) == 1000 and temp[1] == 'nV':
number_sens = 1
temp[1] = 'uV'
elif int(number_sens) == 1000 and temp[1] == 'uV':
number_sens = 1
temp[1] = 'mV'
elif int(number_sens) == 1000 and temp[1] == 'mV':
number_sens = 1
temp[1] = 'V'
tc = str(number_sens) + ' ' + temp[1]
if tc in self.sensitivity_dict:
pass
else:
assert(1 == 2), "Incorrect sensitivity is used"
elif len(sensitivity) == 0:
answer = self.test_sensitivity
return answer
def lock_in_ref_mode(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_mode_dict:
flag = self.ref_mode_dict[md]
self.device_write("RSRC "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("RSRC?"))
answer = cutil.search_keys_dictionary(self.ref_mode_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_mode_dict:
pass
else:
assert(1 == 2), "Incorrect ref mode is used"
elif len(mode) == 0:
answer = self.test_ref_mode
return answer
def lock_in_ref_slope(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_slope_dict:
flag = self.ref_slope_dict[md]
self.device_write("RTRG "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("RTRG?"))
answer = cutil.search_keys_dictionary(self.ref_slope_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_slope_dict:
pass
else:
assert(1 == 2), "Incorrect ref slope is used"
elif len(mode) == 0:
answer = self.test_ref_slope
return answer
def lock_in_sync_filter(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.sync_dict:
flag = self.sync_dict[md]
self.device_write("SYNC "+ str(flag))
else:
general.message("Invalid argument")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("SYNC?"))
answer = cutil.search_keys_dictionary(self.sync_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.sync_dict:
pass
else:
assert(1 == 2), "Incorrect sync filter parameter"
elif len(mode) == 0:
answer = self.test_sync
return answer
def lock_in_lp_filter(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.lp_fil_dict:
flag = self.lp_fil_dict[md]
self.device_write("OFSL "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("OFSL?"))
answer = cutil.search_keys_dictionary(self.lp_fil_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.lp_fil_dict:
pass
else:
assert(1 == 2), "Incorrect low pass filter is used"
elif len(mode) == 0:
answer = self.test_lp_filter
return answer
def lock_in_harmonic(self, *harmonic):
if self.test_flag != 'test':
if len(harmonic) == 1:
harm = int(harmonic[0]);
if harm <= self.harm_max and harm >= self.harm_min:
self.device_write('HARM '+ str(harm))
else:
self.device_write('HARM '+ str(self.harm_min))
general.message("Invalid Argument")
sys.exit()
elif len(harmonic) == 0:
answer = int(self.device_query("HARM?"))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(harmonic) == 1:
harm = float(harmonic[0])
assert(harm <= self.harm_max and harm >= self.harm_min), "Incorrect harmonic is reached"
elif len(harmonic) == 0:
answer = self.test_harmonic
return answer
def lock_in_command(self, command):
if self.test_flag != 'test':
self.device_write(command)
elif self.test_flag == 'test':
pass
def lock_in_query(self, command):
if self.test_flag != 'test':
answer = self.device_query(command)
return answer
elif self.test_flag == 'test':
answer = None
return answer
def main():
pass
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import gc
import sys
import pyvisa
from pyvisa.constants import StopBits, Parity
import atomize.device_modules.config.config_utils as cutil
import atomize.general_modules.general_functions as general
class SR_865a:
#### Basic interaction functions
def __init__(self):
#### Inizialization
# setting path to *.ini file
self.path_current_directory = os.path.dirname(__file__)
self.path_config_file = os.path.join(self.path_current_directory, 'config','SR_865a_config.ini')
# configuration data
self.config = cutil.read_conf_util(self.path_config_file)
self.specific_parameters = cutil.read_specific_parameters(self.path_config_file)
# auxilary dictionaries
self.sensitivity_dict = {'1 nV': 27, '2 nV': 26, '5 nV': 25, '10 nV': 24, '20 nV': 23, '50 nV': 22,
'100 nV': 21, '200 nV': 20, '500 nV': 19, '1 uV': 18, '2 uV': 17, '5 uV': 16,
'10 uV': 15, '20 uV': 14, '50 uV': 13, '100 uV': 12, '200 uV': 11, '500 uV': 10,
'1 mV': 9, '2 mV': 8, '5 mV': 7, '10 mV': 6, '20 mV': 5, '50 mV': 4,
'100 mV': 3, '200 mV': 2, '500 mV': 1, '1 V': 0};
self.helper_sens_list = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
self.timeconstant_dict = {'1 us': 0, '3 us': 1, '10 us': 2, '30 us': 3, '100 us': 4, '300 us': 5,
'1 ms': 6, '3 ms': 7, '10 ms': 8, '30 ms': 9, '100 ms': 10, '300 ms': 11,
'1 s': 12, '3 s': 13, '10 s': 14, '30 s': 15, '100 s': 16, '300 s': 17,
'1 ks': 18, '3 ks': 19, '10 ks': 20, '30 ks': 21};
self.helper_tc_list = [1, 3, 10, 30, 100, 300, 1000]
self.ref_mode_dict = {'Internal': 0, 'External': 1, 'Dual': 2, 'Chop': 3}
self.ref_slope_dict = {'Sine': 0, 'PosTTL': 1, 'NegTTL': 2}
self.sync_dict = {'Off': 0, 'On': 1}
self.lp_fil_dict = {'6 db': 0, '12 dB': 1, "18 dB": 2, "24 dB": 3}
# Ranges and limits
self.ref_freq_min = 0.001
self.ref_freq_max = 4000000
self.ref_ampl_min = 0.000000001
self.ref_ampl_max = 2
self.harm_max = 99
self.harm_min = 1
# Test run parameters
# These values are returned by the modules in the test run
if len(sys.argv) > 1:
self.test_flag = sys.argv[1]
else:
self.test_flag = 'None'
if self.test_flag != 'test':
if self.config['interface'] == 'gpib':
try:
import Gpib
self.status_flag = 1
self.device = Gpib.Gpib(self.config['board_address'], self.config['gpib_address'])
try:
# test should be here
self.device_write('*CLS')
answer = int(float(self.device_query('*TST?')))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.config['interface'] == 'rs232':
try:
self.status_flag = 1
rm = pyvisa.ResourceManager()
self.device = rm.open_resource(self.config['serial_address'], read_termination=self.config['read_termination'],
write_termination=self.config['write_termination'], baud_rate=self.config['baudrate'],
data_bits=self.config['databits'], parity=self.config['parity'], stop_bits=self.config['stopbits'])
self.device.timeout = self.config['timeout']; # in ms
try:
# test should be here
self.device_write('*CLS')
answer = int(self.device_query('*TST?'))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
self.status_flag = 0
general.message("No connection")
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.config['interface'] == 'ethernet':
try:
self.status_flag = 1
rm = pyvisa.ResourceManager()
self.device = rm.open_resource(self.config['ethernet_address'])
self.device.timeout = self.config['timeout'] # in ms
try:
# test should be here
self.device_write('*CLS')
answer = int(self.device_query('*TST?'))
if answer == 0:
self.status_flag = 1
else:
general.message('During internal device test errors are found')
self.status_flag = 0
sys.exit()
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit();
except pyvisa.VisaIOError:
general.message("No connection")
self.status_flag = 0
sys.exit()
except BrokenPipeError:
general.message("No connection")
self.status_flag = 0
sys.exit()
elif self.test_flag == 'test':
self.test_signal = 0.001
self.test_frequency = 10000
self.test_phase = 10
self.test_timeconstant = '10 ms'
self.test_amplitude = 0.3
self.test_sensitivity = '100 mV'
self.test_ref_mode = 'Internal'
self.test_ref_slope = 'Sine'
self.test_sync = 'On'
self.test_lp_filter = '6 dB'
self.test_harmonic = 1
def close_connection(self):
if self.test_flag != 'test':
self.status_flag = 0
gc.collect()
elif self.test_flag == 'test':
pass
def device_write(self, command):
if self.status_flag == 1:
command = str(command)
self.device.write(command)
else:
general.message("No Connection")
self.status_flag = 0
sys.exit()
def device_query(self, command):
if self.status_flag == 1:
if self.config['interface'] == 'gpib':
self.device.write(command)
general.wait('50 ms')
answer = self.device.read().decode()
elif self.config['interface'] == 'rs232':
answer = self.device.query(command)
elif self.config['interface'] == 'ethernet':
answer = self.device.query(command)
return answer
else:
general.message("No Connection")
self.status_flag = 0
sys.exit()
#### device specific functions
def lock_in_name(self):
if self.test_flag != 'test':
answer = self.device_query('*IDN?')
return answer
elif self.test_flag == 'test':
answer = self.config['name']
return answer
def lock_in_ref_frequency(self, *frequency):
if self.test_flag != 'test':
if len(frequency) == 1:
freq = float(frequency[0])
if freq >= self.ref_freq_min and freq <= self.ref_freq_max:
self.device_write('FREQ '+ str(freq))
else:
general.message("Incorrect frequency")
sys.exit()
elif len(frequency) == 0:
answer = float(self.device_query('FREQ?'))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(frequency) == 1:
freq = float(frequency[0])
assert(freq >= self.ref_freq_min and freq <= self.ref_freq_max), "Incorrect frequency is reached"
elif len(frequency) == 0:
answer = self.test_frequency
return answer
def lock_in_phase(self, *degree):
if self.test_flag != 'test':
if len(degree) == 1:
degs = float(degree[0])
if degs >= -360000 and degs <= 360000:
self.device_write('PHAS '+str(degs))
else:
general.message("Incorrect phase")
sys.exit()
elif len(degree) == 0:
answer = float(self.device_query('PHAS?'))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(degree) == 1:
degs = float(degree[0])
assert(degs >= -360000 and degs <= 360000), "Incorrect phase is reached"
elif len(degree) == 0:
answer = self.test_phase
return answer
def lock_in_time_constant(self, *timeconstant):
if self.test_flag != 'test':
if len(timeconstant) == 1:
temp = timeconstant[0].split(' ')
if float(temp[0]) > 30 and temp[1] == 'ks':
general.message("Desired sensitivity cannot be set, the nearest available value is used")
self.device_write("OFLT "+ str(21))
else:
number_tc = min(self.helper_tc_list, key=lambda x: abs(x - int(temp[0])))
if int(number_tc) == 1000 and temp[1] == 'us':
number_tc = 1
temp[1] = 'ms'
elif int(number_tc) == 1000 and temp[1] == 'ms':
number_tc = 1
temp[1] = 's'
elif int(number_tc) == 1000 and temp[1] == 's':
number_tc = 1
temp[1] = 'ks'
if int(number_tc) != int(temp[0]):
general.message("Desired time constant cannot be set, the nearest available value is used")
tc = str(number_tc) + ' ' + temp[1]
if tc in self.timeconstant_dict:
flag = self.timeconstant_dict[tc]
self.device_write("OFLT "+ str(flag))
else:
general.message("Invalid time constant value (too high/too low)")
sys.exit()
elif len(timeconstant) == 0:
raw_answer = int(self.device_query("OFLT?"))
answer = cutil.search_keys_dictionary(self.timeconstant_dict, raw_answer)
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(timeconstant) == 1:
temp = timeconstant[0].split(' ')
if float(temp[0]) > 30 and temp[1] == 'ks':
tc = '30 ks'
else:
number_tc = min(self.helper_tc_list, key=lambda x: abs(x - int(temp[0])))
if int(number_tc) == 1000 and temp[1] == 'us':
number_tc = 1
temp[1] = 'ms'
elif int(number_tc) == 1000 and temp[1] == 'ms':
number_tc = 1
temp[1] = 's'
elif int(number_tc) == 1000 and temp[1] == 's':
number_tc = 1
temp[1] = 'ks'
tc = str(number_tc) + ' ' + temp[1]
if tc in self.timeconstant_dict:
pass
else:
assert(1 == 2), "Incorrect time constant is used"
elif len(timeconstant) == 0:
answer = self.test_timeconstant
return answer
def lock_in_ref_amplitude(self, *amplitude):
if self.test_flag != 'test':
if len(amplitude) == 1:
ampl = float(amplitude[0]);
if ampl <= self.ref_ampl_max and ampl >= self.ref_ampl_min:
self.device_write('SLVL '+ str(ampl))
else:
self.device_write('SLVL '+ str(self.ref_ampl_min))
general.message("Invalid Argument")
sys.exit()
elif len(amplitude) == 0:
answer = float(self.device_query("SLVL?"))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(amplitude) == 1:
ampl = float(amplitude[0]);
assert(ampl <= self.ref_ampl_max and ampl >= self.ref_ampl_min), "Incorrect amplitude is reached"
elif len(amplitude) == 0:
answer = self.test_amplitude
return answer
def lock_in_get_data(self, *channel):
if self.test_flag != 'test':
if len(channel) == 0:
answer = float(self.device_query('OUTP? 0'))
return answer
elif len(channel) == 1 and int(channel[0]) == 1:
answer = float(self.device_query('OUTP? 0'))
return answer
elif len(channel) == 1 and int(channel[0]) == 2:
answer = float(self.device_query('OUTP? 1'))
return answer
elif len(channel) == 1 and int(channel[0]) == 3:
answer = float(self.device_query('OUTP? 2'))
return answer
elif len(channel) == 1 and int(channel[0]) == 4:
answer = float(self.device_query('OUTP? 3'))
return answer
elif len(channel) == 2 and int(channel[0]) == 1 and int(channel[1]) == 2:
answer_string = self.device_query('SNAP? 0,1')
answer_list = answer_string.split(',')
list_of_floats = [float(item) for item in answer_list]
x = list_of_floats[0]
y = list_of_floats[1]
return x, y
elif len(channel) == 3 and int(channel[0]) == 1 and int(channel[1]) == 2 and int(channel[2]) == 3:
answer_string = self.device_query('SNAP? 0,1,2')
answer_list = answer_string.split(',')
list_of_floats = [float(item) for item in answer_list]
x = list_of_floats[0]
y = list_of_floats[1]
r = list_of_floats[2]
return x, y, r
elif self.test_flag == 'test':
if len(channel) == 0:
answer = self.test_signal
return answer
elif len(channel) == 1:
assert(int(channel[0]) == 1 or int(channel[0]) == 2 or \
int(channel[0]) == 3 or int(channel[0]) == 4), 'Invalid channel is given'
answer = self.test_signal
return answer
elif len(channel) == 2 and int(channel[0]) == 1 and int(channel[1]) == 2:
x = y = self.test_signal
return x, y
elif len(channel) == 3 and int(channel[0]) == 1 and int(channel[1]) == 2 and int(channel[2]) == 3:
x = y = r = self.test_signal
return x, y, r
def lock_in_sensitivity(self, *sensitivity):
if self.test_flag != 'test':
if len(sensitivity) == 1:
temp = sensitivity[0].split(' ')
if float(temp[0]) > 1 and temp[1] == 'V':
general.message("Desired sensitivity cannot be set, the nearest available value is used")
self.device_write("SCAL "+ str(0))
else:
number_sens = min(self.helper_sens_list, key=lambda x: abs(x - int(temp[0])))
if int(number_sens) == 1000 and temp[1] == 'nV':
number_sens = 1
temp[1] = 'uV'
elif int(number_sens) == 1000 and temp[1] == 'uV':
number_sens = 1
temp[1] = 'mV'
elif int(number_sens) == 1000 and temp[1] == 'mV':
number_sens = 1
temp[1] = 'V'
sens = str(number_sens) + ' ' + temp[1]
if int(number_sens) != int(temp[0]):
general.message("Desired sensitivity cannot be set, the nearest available value is used")
if sens in self.sensitivity_dict:
flag = self.sensitivity_dict[sens]
self.device_write("SCAL "+ str(flag))
else:
general.message("Invalid sensitivity value (too high/too low)")
sys.exit()
elif len(sensitivity) == 0:
raw_answer = int(self.device_query("SCAL?"))
answer = cutil.search_keys_dictionary(self.sensitivity_dict, raw_answer)
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(sensitivity) == 1:
temp = sensitivity[0].split(' ')
if float(temp[0]) > 1 and temp[1] == 'V':
sens = '1 V'
else:
number_sens = min(self.helper_sens_list, key=lambda x: abs(x - int(temp[0])))
if int(number_sens) == 1000 and temp[1] == 'nV':
number_sens = 1
temp[1] = 'uV'
elif int(number_sens) == 1000 and temp[1] == 'uV':
number_sens = 1
temp[1] = 'mV'
elif int(number_sens) == 1000 and temp[1] == 'mV':
number_sens = 1
temp[1] = 'V'
tc = str(number_sens) + ' ' + temp[1]
if tc in self.sensitivity_dict:
pass
else:
assert(1 == 2), "Incorrect sensitivity is used"
elif len(sensitivity) == 0:
answer = self.test_sensitivity
return answer
def lock_in_ref_mode(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_mode_dict:
flag = self.ref_mode_dict[md]
self.device_write("RSRC "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("RSRC?"))
answer = cutil.search_keys_dictionary(self.ref_mode_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_mode_dict:
pass
else:
assert(1 == 2), "Incorrect ref mode is used"
elif len(mode) == 0:
answer = self.test_ref_mode
return answer
def lock_in_ref_slope(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_slope_dict:
flag = self.ref_slope_dict[md]
self.device_write("RTRG "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("RTRG?"))
answer = cutil.search_keys_dictionary(self.ref_slope_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.ref_slope_dict:
pass
else:
assert(1 == 2), "Incorrect ref slope is used"
elif len(mode) == 0:
answer = self.test_ref_slope
return answer
def lock_in_sync_filter(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.sync_dict:
flag = self.sync_dict[md]
self.device_write("SYNC "+ str(flag))
else:
general.message("Invalid argument")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("SYNC?"))
answer = cutil.search_keys_dictionary(self.sync_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.sync_dict:
pass
else:
assert(1 == 2), "Incorrect sync filter parameter"
elif len(mode) == 0:
answer = self.test_sync
return answer
def lock_in_lp_filter(self, *mode):
if self.test_flag != 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.lp_fil_dict:
flag = self.lp_fil_dict[md]
self.device_write("OFSL "+ str(flag))
else:
general.message("Invalid mode")
sys.exit()
elif len(mode) == 0:
raw_answer = int(self.device_query("OFSL?"))
answer = cutil.search_keys_dictionary(self.lp_fil_dict, raw_answer)
return answer
else:
general.message("Invalid argumnet")
sys.exit()
elif self.test_flag == 'test':
if len(mode) == 1:
md = str(mode[0])
if md in self.lp_fil_dict:
pass
else:
assert(1 == 2), "Incorrect low pass filter is used"
elif len(mode) == 0:
answer = self.test_lp_filter
return answer
def lock_in_harmonic(self, *harmonic):
if self.test_flag != 'test':
if len(harmonic) == 1:
harm = int(harmonic[0]);
if harm <= self.harm_max and harm >= self.harm_min:
self.device_write('HARM '+ str(harm))
else:
self.device_write('HARM '+ str(self.harm_min))
general.message("Invalid Argument")
sys.exit()
elif len(harmonic) == 0:
answer = int(self.device_query("HARM?"))
return answer
else:
general.message("Invalid Argument")
sys.exit()
elif self.test_flag == 'test':
if len(harmonic) == 1:
harm = float(harmonic[0])
assert(harm <= self.harm_max and harm >= self.harm_min), "Incorrect harmonic is reached"
elif len(harmonic) == 0:
answer = self.test_harmonic
return answer
def lock_in_command(self, command):
if self.test_flag != 'test':
self.device_write(command)
elif self.test_flag == 'test':
pass
def lock_in_query(self, command):
if self.test_flag != 'test':
answer = self.device_query(command)
return answer
elif self.test_flag == 'test':
answer = None
return answer
def main():
pass
if __name__ == "__main__":
main()
|
en
| 0.675379
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #### Basic interaction functions #### Inizialization # setting path to *.ini file # configuration data # auxilary dictionaries # Ranges and limits # Test run parameters # These values are returned by the modules in the test run # test should be here # in ms # test should be here # in ms # test should be here #### device specific functions
| 1.942308
| 2
|
pages/login.py
|
armaaar/items-catalog
| 0
|
6625413
|
<reponame>armaaar/items-catalog<filename>pages/login.py
from _imports import *
from db import *
from flask import send_from_directory
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import requests
import json
import os
CLIENT_ID = json.loads(
open('client_secret.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
def login():
# STEP 1 - Parse the auth code
auth_code = request.data
# STEP 2 - Exchange for a token
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secret.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(auth_code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' %
access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# STEP 3 - Find User or make a new one
# Get user info
h = httplib2.Http()
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
name = data['name']
email = data['email']
# see if user exists, if it doesn't make a new one
user = db_session.query(User).filter_by(email=email).first()
if not user:
user = User(name=name, email=email)
db_session.add(user)
db_session.commit()
# STEP 4 - Make session
g.user = user
session['user_id'] = user.id
session['gplus_id'] = gplus_id
session['access_token'] = credentials.access_token
# STEP 5 - Send back token to the client
return "ok"
|
from _imports import *
from db import *
from flask import send_from_directory
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import requests
import json
import os
CLIENT_ID = json.loads(
open('client_secret.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
def login():
# STEP 1 - Parse the auth code
auth_code = request.data
# STEP 2 - Exchange for a token
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secret.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(auth_code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' %
access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# STEP 3 - Find User or make a new one
# Get user info
h = httplib2.Http()
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
name = data['name']
email = data['email']
# see if user exists, if it doesn't make a new one
user = db_session.query(User).filter_by(email=email).first()
if not user:
user = User(name=name, email=email)
db_session.add(user)
db_session.commit()
# STEP 4 - Make session
g.user = user
session['user_id'] = user.id
session['gplus_id'] = gplus_id
session['access_token'] = credentials.access_token
# STEP 5 - Send back token to the client
return "ok"
|
en
| 0.811632
|
# STEP 1 - Parse the auth code # STEP 2 - Exchange for a token # Upgrade the authorization code into a credentials object # Check that the access token is valid. # If there was an error in the access token info, abort. # Verify that the access token is used for the intended user. # Verify that the access token is valid for this app. # STEP 3 - Find User or make a new one # Get user info # see if user exists, if it doesn't make a new one # STEP 4 - Make session # STEP 5 - Send back token to the client
| 2.750098
| 3
|
check.py
|
toni-heittola/dcase_glossary
| 1
|
6625414
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import glob
import yaml
import argparse
def main(argv):
parser = argparse.ArgumentParser(description='Script to investigate glossary file.')
parser.add_argument("-D", "--definitions", help="show terms with empty definitions", action="store_true")
parser.add_argument("-FI", "--fi", help="show terms without Finnish translation", action="store_true")
parser.add_argument("-ES", "--es", help="show terms without Spanish translation", action="store_true")
parser.add_argument("-FR", "--fr", help="show terms without French translation", action="store_true")
parser.add_argument("-DE", "--de", help="show terms without German translation", action="store_true")
parser.add_argument("-PL", "--pl", help="show terms without Polish translation", action="store_true")
args = parser.parse_args()
print('Checking glossary file')
print('========================================================')
print()
glossary_file = 'glossary.yaml'
# Read template
with open(glossary_file, 'r') as file:
glossary = yaml.load(file, Loader=yaml.FullLoader)
data = {}
for item in glossary['glossary']:
data[item['term'].lower()] = item
sorted_terms = sorted(data)
items_without_definition = 0
items_fi = 0
items_pl = 0
items_es = 0
items_fr = 0
items_de = 0
items_wikipedia = 0
items_wiktionary = 0
for term in sorted_terms:
if 'definition' not in data[term]:
items_without_definition += 1
if 'fi' in data[term]:
items_fi += 1
if 'pl' in data[term]:
items_pl += 1
if 'es' in data[term]:
items_es += 1
if 'fr' in data[term]:
items_fr += 1
if 'de' in data[term]:
items_de += 1
if 'wikipedia' in data[term]:
items_wikipedia += 1
if 'wiktionary' in data[term]:
items_wiktionary += 1
print('=== Items ===')
print('Items :', len(glossary['glossary']))
print('Items without definition (%) :', round((items_without_definition / len(glossary['glossary'])) * 100.0), '% ', '(', items_without_definition, ')')
print()
print('=== Languages ===')
print('Items with Finnish translation:', round((items_fi / len(glossary['glossary'])) * 100.0), '% ', '(', items_fi, ')')
print('Items with Spanish translation:', round((items_es / len(glossary['glossary'])) * 100.0), '% ', '(', items_es, ')')
print('Items with French translation :', round((items_fr / len(glossary['glossary'])) * 100.0), '% ', '(', items_fr, ')')
print('Items with German translation :', round((items_de / len(glossary['glossary'])) * 100.0), '% ', '(', items_de, ')')
print('Items with Polish translation :', round((items_pl / len(glossary['glossary'])) * 100.0), '% ', '(', items_pl, ')')
print()
print('=== Links ===')
print('Items with wikipedia link :', round((items_wikipedia/len(glossary['glossary']))*100.0), '% ', '(', items_wikipedia, ')')
print('Items with wiktionary link :', round((items_wiktionary / len(glossary['glossary'])) * 100.0), '% ', '(', items_wiktionary, ')')
if args.definitions:
print()
print('=== Terms without definition ===')
for term in sorted_terms:
if 'definition' not in data[term]:
print(' ', data[term]['term'])
if args.fi:
print()
print('=== Terms without Finnish translation ===')
for term in sorted_terms:
if 'fi' not in data[term]:
print(' ', data[term]['term'])
if args.es:
print()
print('=== Terms without Spanish translation ===')
for term in sorted_terms:
if 'es' not in data[term]:
print(' ', data[term]['term'])
if args.fr:
print()
print('=== Terms without French translation ===')
for term in sorted_terms:
if 'fr' not in data[term]:
print(' ', data[term]['term'])
if args.de:
print()
print('=== Terms without German translation ===')
for term in sorted_terms:
if 'de' not in data[term]:
print(' ', data[term]['term'])
if args.pl:
print()
print('=== Terms without Polish translation ===')
for term in sorted_terms:
if 'pl' not in data[term]:
print(' ', data[term]['term'])
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import glob
import yaml
import argparse
def main(argv):
parser = argparse.ArgumentParser(description='Script to investigate glossary file.')
parser.add_argument("-D", "--definitions", help="show terms with empty definitions", action="store_true")
parser.add_argument("-FI", "--fi", help="show terms without Finnish translation", action="store_true")
parser.add_argument("-ES", "--es", help="show terms without Spanish translation", action="store_true")
parser.add_argument("-FR", "--fr", help="show terms without French translation", action="store_true")
parser.add_argument("-DE", "--de", help="show terms without German translation", action="store_true")
parser.add_argument("-PL", "--pl", help="show terms without Polish translation", action="store_true")
args = parser.parse_args()
print('Checking glossary file')
print('========================================================')
print()
glossary_file = 'glossary.yaml'
# Read template
with open(glossary_file, 'r') as file:
glossary = yaml.load(file, Loader=yaml.FullLoader)
data = {}
for item in glossary['glossary']:
data[item['term'].lower()] = item
sorted_terms = sorted(data)
items_without_definition = 0
items_fi = 0
items_pl = 0
items_es = 0
items_fr = 0
items_de = 0
items_wikipedia = 0
items_wiktionary = 0
for term in sorted_terms:
if 'definition' not in data[term]:
items_without_definition += 1
if 'fi' in data[term]:
items_fi += 1
if 'pl' in data[term]:
items_pl += 1
if 'es' in data[term]:
items_es += 1
if 'fr' in data[term]:
items_fr += 1
if 'de' in data[term]:
items_de += 1
if 'wikipedia' in data[term]:
items_wikipedia += 1
if 'wiktionary' in data[term]:
items_wiktionary += 1
print('=== Items ===')
print('Items :', len(glossary['glossary']))
print('Items without definition (%) :', round((items_without_definition / len(glossary['glossary'])) * 100.0), '% ', '(', items_without_definition, ')')
print()
print('=== Languages ===')
print('Items with Finnish translation:', round((items_fi / len(glossary['glossary'])) * 100.0), '% ', '(', items_fi, ')')
print('Items with Spanish translation:', round((items_es / len(glossary['glossary'])) * 100.0), '% ', '(', items_es, ')')
print('Items with French translation :', round((items_fr / len(glossary['glossary'])) * 100.0), '% ', '(', items_fr, ')')
print('Items with German translation :', round((items_de / len(glossary['glossary'])) * 100.0), '% ', '(', items_de, ')')
print('Items with Polish translation :', round((items_pl / len(glossary['glossary'])) * 100.0), '% ', '(', items_pl, ')')
print()
print('=== Links ===')
print('Items with wikipedia link :', round((items_wikipedia/len(glossary['glossary']))*100.0), '% ', '(', items_wikipedia, ')')
print('Items with wiktionary link :', round((items_wiktionary / len(glossary['glossary'])) * 100.0), '% ', '(', items_wiktionary, ')')
if args.definitions:
print()
print('=== Terms without definition ===')
for term in sorted_terms:
if 'definition' not in data[term]:
print(' ', data[term]['term'])
if args.fi:
print()
print('=== Terms without Finnish translation ===')
for term in sorted_terms:
if 'fi' not in data[term]:
print(' ', data[term]['term'])
if args.es:
print()
print('=== Terms without Spanish translation ===')
for term in sorted_terms:
if 'es' not in data[term]:
print(' ', data[term]['term'])
if args.fr:
print()
print('=== Terms without French translation ===')
for term in sorted_terms:
if 'fr' not in data[term]:
print(' ', data[term]['term'])
if args.de:
print()
print('=== Terms without German translation ===')
for term in sorted_terms:
if 'de' not in data[term]:
print(' ', data[term]['term'])
if args.pl:
print()
print('=== Terms without Polish translation ===')
for term in sorted_terms:
if 'pl' not in data[term]:
print(' ', data[term]['term'])
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
en
| 0.312125
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # Read template
| 3.345346
| 3
|
oneflow/python/test/ops/test_tril.py
|
Ldpe2G/oneflow
| 2
|
6625415
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import (
GenArgDict,
test_global_storage,
type_name_to_flow_type,
type_name_to_np_type,
)
import oneflow.typing as oft
def _test_tril_fw_bw(test_case, device, shape, type_name, diagonal, fill_value):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
if type_name == "float16":
flow_type = flow.float
np_type = np.float32
else:
flow_type = type_name_to_flow_type[type_name]
np_type = type_name_to_np_type[type_name]
@flow.global_function(type="train", function_config=func_config)
def test_tril_fw_bw_job(x: oft.Numpy.Placeholder(shape, dtype=flow_type),):
with flow.scope.placement(device, "0:0"):
x_var = flow.get_variable(
name="xv",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x += flow.cast(x_var, dtype=flow_type)
if type_name == "float16":
out = flow.cast(
flow.math.tril(flow.cast(x, flow.float16), diagonal), flow.float
)
else:
out = flow.math.tril(x, diagonal)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(out)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(out, test_global_storage.Setter("out"))
flow.watch_diff(out, test_global_storage.Setter("out_diff"))
return out
check_point = flow.train.CheckPoint()
check_point.init()
x = np.random.randint(low=0, high=100, size=shape)
test_tril_fw_bw_job(x.astype(np_type)).get()
np_out = np.where(
np.tril(np.ones(shape), diagonal),
test_global_storage.Get("x"),
np.full(shape, fill_value).astype(np_type),
)
np_x_diff = np.tril(test_global_storage.Get("out_diff"), diagonal)
if type_name == "float16":
tolerance = 1e-3
else:
tolerance = 1e-5
test_case.assertTrue(
np.allclose(
np_out, test_global_storage.Get("out"), rtol=tolerance, atol=tolerance
)
)
test_case.assertTrue(
np.allclose(
np_x_diff, test_global_storage.Get("x_diff"), rtol=tolerance, atol=tolerance
)
)
@flow.unittest.skip_unless_1n1d()
class TestTril(flow.unittest.TestCase):
def test_tril_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "gpu"]
arg_dict["type_name"] = [
"float32",
"float16",
"double",
"int32",
"int64",
]
arg_dict["shape"] = [(6, 6), (3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 1, 8]
arg_dict["fill_value"] = [1.0, 0]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
continue
if isinstance(arg["fill_value"], float) and arg_dict["type_name"] not in [
"float32",
"float16",
"double",
]:
continue
_test_tril_fw_bw(test_case, **arg)
if __name__ == "__main__":
unittest.main()
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import (
GenArgDict,
test_global_storage,
type_name_to_flow_type,
type_name_to_np_type,
)
import oneflow.typing as oft
def _test_tril_fw_bw(test_case, device, shape, type_name, diagonal, fill_value):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
if type_name == "float16":
flow_type = flow.float
np_type = np.float32
else:
flow_type = type_name_to_flow_type[type_name]
np_type = type_name_to_np_type[type_name]
@flow.global_function(type="train", function_config=func_config)
def test_tril_fw_bw_job(x: oft.Numpy.Placeholder(shape, dtype=flow_type),):
with flow.scope.placement(device, "0:0"):
x_var = flow.get_variable(
name="xv",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x += flow.cast(x_var, dtype=flow_type)
if type_name == "float16":
out = flow.cast(
flow.math.tril(flow.cast(x, flow.float16), diagonal), flow.float
)
else:
out = flow.math.tril(x, diagonal)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(out)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(out, test_global_storage.Setter("out"))
flow.watch_diff(out, test_global_storage.Setter("out_diff"))
return out
check_point = flow.train.CheckPoint()
check_point.init()
x = np.random.randint(low=0, high=100, size=shape)
test_tril_fw_bw_job(x.astype(np_type)).get()
np_out = np.where(
np.tril(np.ones(shape), diagonal),
test_global_storage.Get("x"),
np.full(shape, fill_value).astype(np_type),
)
np_x_diff = np.tril(test_global_storage.Get("out_diff"), diagonal)
if type_name == "float16":
tolerance = 1e-3
else:
tolerance = 1e-5
test_case.assertTrue(
np.allclose(
np_out, test_global_storage.Get("out"), rtol=tolerance, atol=tolerance
)
)
test_case.assertTrue(
np.allclose(
np_x_diff, test_global_storage.Get("x_diff"), rtol=tolerance, atol=tolerance
)
)
@flow.unittest.skip_unless_1n1d()
class TestTril(flow.unittest.TestCase):
def test_tril_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "gpu"]
arg_dict["type_name"] = [
"float32",
"float16",
"double",
"int32",
"int64",
]
arg_dict["shape"] = [(6, 6), (3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 1, 8]
arg_dict["fill_value"] = [1.0, 0]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
continue
if isinstance(arg["fill_value"], float) and arg_dict["type_name"] not in [
"float32",
"float16",
"double",
]:
continue
_test_tril_fw_bw(test_case, **arg)
if __name__ == "__main__":
unittest.main()
|
en
| 0.864155
|
Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 1.889432
| 2
|
source/NVDAObjects/IAccessible/mozilla.py
|
raza-al-pakistani/raza-al-pakistani--v20022.3.1
| 0
|
6625416
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2021 NV Access Limited, <NAME>
import IAccessibleHandler
from comInterfaces import IAccessible2Lib as IA2
import oleacc
import winUser
import controlTypes
from . import IAccessible, WindowRoot
from logHandler import log
from NVDAObjects.behaviors import RowWithFakeNavigation
from . import ia2Web
class Mozilla(ia2Web.Ia2Web):
def _get_states(self):
states = super(Mozilla, self).states
if self.IAccessibleStates & oleacc.STATE_SYSTEM_MARQUEED:
states.add(controlTypes.State.CHECKABLE)
return states
def _get_descriptionFrom(self) -> controlTypes.DescriptionFrom:
"""Firefox does not yet support 'description-from' attribute (which informs
NVDA of the source of accDescription after the name/description computation
is complete. However, a primary use-case can be supported via the IA2attribute
'description' which is exposed by Firefox and tells us the value of the "aria-description"
attribute. If the value of accDescription matches, we can infer that the source
of accDescription is 'aria-description'.
Note:
At the time of development some 'generic HTML elements' (E.G. 'span') may not be exposed by Firefox,
even if the element has an aria-description attribute.
Other more significant ARIA attributes such as role may cause the element to be exposed.
"""
log.debug("Getting mozilla descriptionFrom")
ariaDesc = self.IA2Attributes.get("description", "")
log.debug(f"description IA2Attribute is: {ariaDesc}")
if (
ariaDesc == "" # aria-description is missing or empty
# Ensure that aria-description is actually the value used.
# I.E. accDescription is sourced from the aria-description attribute as a result of the
# name/description computation.
# If the values don't match, some other source must have been used.
or self.description != ariaDesc
):
return controlTypes.DescriptionFrom.UNKNOWN
else:
return controlTypes.DescriptionFrom.ARIA_DESCRIPTION
def _get_presentationType(self):
presType=super(Mozilla,self).presentationType
if presType==self.presType_content:
if self.role==controlTypes.Role.TABLE and self.IA2Attributes.get('layout-guess')=='true':
presType=self.presType_layout
elif self.table and self.table.presentationType==self.presType_layout:
presType=self.presType_layout
return presType
class Document(ia2Web.Document):
def _get_parent(self):
res = IAccessibleHandler.accParent(
self.IAccessibleObject, self.IAccessibleChildID
)
if not res:
# accParent is broken in Firefox for same-process iframe documents.
# Use NODE_CHILD_OF instead.
res = IAccessibleHandler.accNavigate(
self.IAccessibleObject, self.IAccessibleChildID,
IAccessibleHandler.NAVRELATION_NODE_CHILD_OF
)
if not res:
return None
return IAccessible(IAccessibleObject=res[0], IAccessibleChildID=res[1])
def _get_treeInterceptorClass(self):
if controlTypes.State.EDITABLE not in self.states:
import virtualBuffers.gecko_ia2
return virtualBuffers.gecko_ia2.Gecko_ia2
return super(Document,self).treeInterceptorClass
class EmbeddedObject(Mozilla):
def _get_shouldAllowIAccessibleFocusEvent(self):
focusWindow = winUser.getGUIThreadInfo(self.windowThreadID).hwndFocus
if self.windowHandle != focusWindow:
# This window doesn't have the focus, which means the embedded object's window probably already has the focus.
# We don't want to override the focus event fired by the embedded object.
return False
return super(EmbeddedObject, self).shouldAllowIAccessibleFocusEvent
class GeckoPluginWindowRoot(WindowRoot):
parentUsesSuperOnWindowRootIAccessible = False
def _get_parent(self):
parent=super(GeckoPluginWindowRoot,self).parent
if parent.IAccessibleRole==oleacc.ROLE_SYSTEM_CLIENT:
# Skip the window wrapping the plugin window,
# which doesn't expose a Gecko accessible in Gecko >= 11.
parent=parent.parent.parent
res = IAccessibleHandler.accNavigate(parent.IAccessibleObject, 0, IAccessibleHandler.NAVRELATION_EMBEDS)
if res:
obj = IAccessible(IAccessibleObject=res[0], IAccessibleChildID=res[1])
if obj:
if controlTypes.State.OFFSCREEN not in obj.states:
return obj
else:
log.debugWarning("NAVRELATION_EMBEDS returned an offscreen document, name %r" % obj.name)
else:
log.debugWarning("NAVRELATION_EMBEDS returned an invalid object")
else:
log.debugWarning("NAVRELATION_EMBEDS failed")
return parent
class TextLeaf(Mozilla):
role = controlTypes.Role.STATICTEXT
beTransparentToMouse = True
def findExtraOverlayClasses(obj, clsList):
"""Determine the most appropriate class if this is a Mozilla object.
This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.
"""
if not isinstance(obj.IAccessibleObject, IA2.IAccessible2):
return
iaRole = obj.IAccessibleRole
cls = None
if iaRole == oleacc.ROLE_SYSTEM_TEXT:
# Check if this is a text leaf.
iaStates = obj.IAccessibleStates
# Text leaves are never focusable.
# Not unavailable excludes disabled editable text fields (which also aren't focusable).
if not (iaStates & oleacc.STATE_SYSTEM_FOCUSABLE or iaStates & oleacc.STATE_SYSTEM_UNAVAILABLE):
# This excludes a non-focusable @role="textbox".
if not (obj.IA2States & IA2.IA2_STATE_EDITABLE):
cls = TextLeaf
if not cls:
cls = _IAccessibleRolesToOverlayClasses.get(iaRole)
if cls:
clsList.append(cls)
if iaRole == oleacc.ROLE_SYSTEM_ROW:
clsList.append(RowWithFakeNavigation)
elif iaRole == oleacc.ROLE_SYSTEM_LISTITEM and hasattr(obj.parent, "IAccessibleTableObject"):
clsList.append(RowWithFakeNavigation)
elif iaRole == oleacc.ROLE_SYSTEM_OUTLINEITEM:
# Check if the tree view is a table.
parent = obj.parent
# Tree view items may be nested, so skip any tree view item ancestors.
while parent and isinstance(parent, Mozilla) and parent.IAccessibleRole == oleacc.ROLE_SYSTEM_OUTLINEITEM:
newParent = parent.parent
parent.parent = newParent
parent = newParent
if hasattr(parent, "IAccessibleTableObject") or hasattr(parent, "IAccessibleTable2Object"):
clsList.append(RowWithFakeNavigation)
ia2Web.findExtraOverlayClasses(obj, clsList,
baseClass=Mozilla, documentClass=Document)
#: Maps IAccessible roles to NVDAObject overlay classes.
_IAccessibleRolesToOverlayClasses = {
IA2.IA2_ROLE_EMBEDDED_OBJECT: EmbeddedObject,
"embed": EmbeddedObject,
"object": EmbeddedObject,
}
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2021 NV Access Limited, <NAME>
import IAccessibleHandler
from comInterfaces import IAccessible2Lib as IA2
import oleacc
import winUser
import controlTypes
from . import IAccessible, WindowRoot
from logHandler import log
from NVDAObjects.behaviors import RowWithFakeNavigation
from . import ia2Web
class Mozilla(ia2Web.Ia2Web):
def _get_states(self):
states = super(Mozilla, self).states
if self.IAccessibleStates & oleacc.STATE_SYSTEM_MARQUEED:
states.add(controlTypes.State.CHECKABLE)
return states
def _get_descriptionFrom(self) -> controlTypes.DescriptionFrom:
"""Firefox does not yet support 'description-from' attribute (which informs
NVDA of the source of accDescription after the name/description computation
is complete. However, a primary use-case can be supported via the IA2attribute
'description' which is exposed by Firefox and tells us the value of the "aria-description"
attribute. If the value of accDescription matches, we can infer that the source
of accDescription is 'aria-description'.
Note:
At the time of development some 'generic HTML elements' (E.G. 'span') may not be exposed by Firefox,
even if the element has an aria-description attribute.
Other more significant ARIA attributes such as role may cause the element to be exposed.
"""
log.debug("Getting mozilla descriptionFrom")
ariaDesc = self.IA2Attributes.get("description", "")
log.debug(f"description IA2Attribute is: {ariaDesc}")
if (
ariaDesc == "" # aria-description is missing or empty
# Ensure that aria-description is actually the value used.
# I.E. accDescription is sourced from the aria-description attribute as a result of the
# name/description computation.
# If the values don't match, some other source must have been used.
or self.description != ariaDesc
):
return controlTypes.DescriptionFrom.UNKNOWN
else:
return controlTypes.DescriptionFrom.ARIA_DESCRIPTION
def _get_presentationType(self):
presType=super(Mozilla,self).presentationType
if presType==self.presType_content:
if self.role==controlTypes.Role.TABLE and self.IA2Attributes.get('layout-guess')=='true':
presType=self.presType_layout
elif self.table and self.table.presentationType==self.presType_layout:
presType=self.presType_layout
return presType
class Document(ia2Web.Document):
def _get_parent(self):
res = IAccessibleHandler.accParent(
self.IAccessibleObject, self.IAccessibleChildID
)
if not res:
# accParent is broken in Firefox for same-process iframe documents.
# Use NODE_CHILD_OF instead.
res = IAccessibleHandler.accNavigate(
self.IAccessibleObject, self.IAccessibleChildID,
IAccessibleHandler.NAVRELATION_NODE_CHILD_OF
)
if not res:
return None
return IAccessible(IAccessibleObject=res[0], IAccessibleChildID=res[1])
def _get_treeInterceptorClass(self):
if controlTypes.State.EDITABLE not in self.states:
import virtualBuffers.gecko_ia2
return virtualBuffers.gecko_ia2.Gecko_ia2
return super(Document,self).treeInterceptorClass
class EmbeddedObject(Mozilla):
def _get_shouldAllowIAccessibleFocusEvent(self):
focusWindow = winUser.getGUIThreadInfo(self.windowThreadID).hwndFocus
if self.windowHandle != focusWindow:
# This window doesn't have the focus, which means the embedded object's window probably already has the focus.
# We don't want to override the focus event fired by the embedded object.
return False
return super(EmbeddedObject, self).shouldAllowIAccessibleFocusEvent
class GeckoPluginWindowRoot(WindowRoot):
parentUsesSuperOnWindowRootIAccessible = False
def _get_parent(self):
parent=super(GeckoPluginWindowRoot,self).parent
if parent.IAccessibleRole==oleacc.ROLE_SYSTEM_CLIENT:
# Skip the window wrapping the plugin window,
# which doesn't expose a Gecko accessible in Gecko >= 11.
parent=parent.parent.parent
res = IAccessibleHandler.accNavigate(parent.IAccessibleObject, 0, IAccessibleHandler.NAVRELATION_EMBEDS)
if res:
obj = IAccessible(IAccessibleObject=res[0], IAccessibleChildID=res[1])
if obj:
if controlTypes.State.OFFSCREEN not in obj.states:
return obj
else:
log.debugWarning("NAVRELATION_EMBEDS returned an offscreen document, name %r" % obj.name)
else:
log.debugWarning("NAVRELATION_EMBEDS returned an invalid object")
else:
log.debugWarning("NAVRELATION_EMBEDS failed")
return parent
class TextLeaf(Mozilla):
role = controlTypes.Role.STATICTEXT
beTransparentToMouse = True
def findExtraOverlayClasses(obj, clsList):
"""Determine the most appropriate class if this is a Mozilla object.
This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method.
"""
if not isinstance(obj.IAccessibleObject, IA2.IAccessible2):
return
iaRole = obj.IAccessibleRole
cls = None
if iaRole == oleacc.ROLE_SYSTEM_TEXT:
# Check if this is a text leaf.
iaStates = obj.IAccessibleStates
# Text leaves are never focusable.
# Not unavailable excludes disabled editable text fields (which also aren't focusable).
if not (iaStates & oleacc.STATE_SYSTEM_FOCUSABLE or iaStates & oleacc.STATE_SYSTEM_UNAVAILABLE):
# This excludes a non-focusable @role="textbox".
if not (obj.IA2States & IA2.IA2_STATE_EDITABLE):
cls = TextLeaf
if not cls:
cls = _IAccessibleRolesToOverlayClasses.get(iaRole)
if cls:
clsList.append(cls)
if iaRole == oleacc.ROLE_SYSTEM_ROW:
clsList.append(RowWithFakeNavigation)
elif iaRole == oleacc.ROLE_SYSTEM_LISTITEM and hasattr(obj.parent, "IAccessibleTableObject"):
clsList.append(RowWithFakeNavigation)
elif iaRole == oleacc.ROLE_SYSTEM_OUTLINEITEM:
# Check if the tree view is a table.
parent = obj.parent
# Tree view items may be nested, so skip any tree view item ancestors.
while parent and isinstance(parent, Mozilla) and parent.IAccessibleRole == oleacc.ROLE_SYSTEM_OUTLINEITEM:
newParent = parent.parent
parent.parent = newParent
parent = newParent
if hasattr(parent, "IAccessibleTableObject") or hasattr(parent, "IAccessibleTable2Object"):
clsList.append(RowWithFakeNavigation)
ia2Web.findExtraOverlayClasses(obj, clsList,
baseClass=Mozilla, documentClass=Document)
#: Maps IAccessible roles to NVDAObject overlay classes.
_IAccessibleRolesToOverlayClasses = {
IA2.IA2_ROLE_EMBEDDED_OBJECT: EmbeddedObject,
"embed": EmbeddedObject,
"object": EmbeddedObject,
}
|
en
| 0.853187
|
# -*- coding: UTF-8 -*- # A part of NonVisual Desktop Access (NVDA) # This file is covered by the GNU General Public License. # See the file COPYING for more details. # Copyright (C) 2006-2021 NV Access Limited, <NAME> Firefox does not yet support 'description-from' attribute (which informs NVDA of the source of accDescription after the name/description computation is complete. However, a primary use-case can be supported via the IA2attribute 'description' which is exposed by Firefox and tells us the value of the "aria-description" attribute. If the value of accDescription matches, we can infer that the source of accDescription is 'aria-description'. Note: At the time of development some 'generic HTML elements' (E.G. 'span') may not be exposed by Firefox, even if the element has an aria-description attribute. Other more significant ARIA attributes such as role may cause the element to be exposed. # aria-description is missing or empty # Ensure that aria-description is actually the value used. # I.E. accDescription is sourced from the aria-description attribute as a result of the # name/description computation. # If the values don't match, some other source must have been used. # accParent is broken in Firefox for same-process iframe documents. # Use NODE_CHILD_OF instead. # This window doesn't have the focus, which means the embedded object's window probably already has the focus. # We don't want to override the focus event fired by the embedded object. # Skip the window wrapping the plugin window, # which doesn't expose a Gecko accessible in Gecko >= 11. Determine the most appropriate class if this is a Mozilla object. This works similarly to L{NVDAObjects.NVDAObject.findOverlayClasses} except that it never calls any other findOverlayClasses method. # Check if this is a text leaf. # Text leaves are never focusable. # Not unavailable excludes disabled editable text fields (which also aren't focusable). # This excludes a non-focusable @role="textbox". # Check if the tree view is a table. # Tree view items may be nested, so skip any tree view item ancestors. #: Maps IAccessible roles to NVDAObject overlay classes.
| 1.90142
| 2
|
lib/galaxy/util/monitors.py
|
innovate-invent/galaxy
| 1
|
6625417
|
from __future__ import absolute_import
import logging
import threading
from galaxy.web.stack import register_postfork_function
from .sleeper import Sleeper
log = logging.getLogger(__name__)
DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT = 5
class Monitors(object):
def _init_monitor_thread(self, name, target_name=None, target=None, start=False, config=None):
self.monitor_join_sleep = getattr(config, "monitor_thread_join_timeout", DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT)
self.monitor_join = self.monitor_join_sleep > 0
self.monitor_sleeper = Sleeper()
self.monitor_running = True
if target is not None:
assert target_name is None
monitor_func = target
else:
target_name = target_name or "monitor"
monitor_func = getattr(self, target_name)
self.sleeper = Sleeper()
self.monitor_thread = threading.Thread(name=name, target=monitor_func)
self.monitor_thread.setDaemon(True)
self._start = start
register_postfork_function(self.start_monitoring)
def start_monitoring(self):
if self._start:
self.monitor_thread.start()
def stop_monitoring(self):
self.monitor_running = False
def _monitor_sleep(self, sleep_amount):
self.sleeper.sleep(sleep_amount)
def shutdown_monitor(self):
self.stop_monitoring()
self.sleeper.wake()
if self.monitor_join:
log.debug("Joining monitor thread")
self.monitor_thread.join(self.monitor_join_sleep)
|
from __future__ import absolute_import
import logging
import threading
from galaxy.web.stack import register_postfork_function
from .sleeper import Sleeper
log = logging.getLogger(__name__)
DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT = 5
class Monitors(object):
def _init_monitor_thread(self, name, target_name=None, target=None, start=False, config=None):
self.monitor_join_sleep = getattr(config, "monitor_thread_join_timeout", DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT)
self.monitor_join = self.monitor_join_sleep > 0
self.monitor_sleeper = Sleeper()
self.monitor_running = True
if target is not None:
assert target_name is None
monitor_func = target
else:
target_name = target_name or "monitor"
monitor_func = getattr(self, target_name)
self.sleeper = Sleeper()
self.monitor_thread = threading.Thread(name=name, target=monitor_func)
self.monitor_thread.setDaemon(True)
self._start = start
register_postfork_function(self.start_monitoring)
def start_monitoring(self):
if self._start:
self.monitor_thread.start()
def stop_monitoring(self):
self.monitor_running = False
def _monitor_sleep(self, sleep_amount):
self.sleeper.sleep(sleep_amount)
def shutdown_monitor(self):
self.stop_monitoring()
self.sleeper.wake()
if self.monitor_join:
log.debug("Joining monitor thread")
self.monitor_thread.join(self.monitor_join_sleep)
|
none
| 1
| 2.351417
| 2
|
|
Login!/Profile/css/flask-image-upload-thing-master/tests/test_delete.py
|
baguilar1998/artstartWebsite
| 20
|
6625418
|
from StringIO import StringIO
from flexmock import flexmock
from flask_uploads import delete, save
from . import TestCase
from .test_save import FakeImage
class TestDelete(TestCase):
def setup_method(self, method):
if method == self.test_deletes_resized_images_from_storage:
resizer = flexmock(sizes={'nail': (238, 23)})
(resizer
.should_receive('resize_image')
.and_return({'nail': FakeImage('nail', 'jpg')}))
else:
resizer = None
TestCase.setup_method(self, method, resizer)
save(StringIO(u'cinna'), u'games')
self.upload = self.committed_objects[0]
self.added_objects[:] = []
self.committed_objects[:] = []
def test_deletes_from_storage(self):
assert self.storage.exists(u'games')
delete(self.upload)
assert not self.storage.exists(u'games')
def test_deletes_resized_images_from_storage(self):
assert self.storage.exists(u'games_nail.jpg')
delete(self.upload)
assert not self.storage.exists(u'games_nail.jpg')
def test_deletes_from_db(self):
delete(self.upload)
assert self.upload in self.deleted_objects
def test_commits_deletion(self):
delete(self.upload)
assert self.upload in self.committed_objects
|
from StringIO import StringIO
from flexmock import flexmock
from flask_uploads import delete, save
from . import TestCase
from .test_save import FakeImage
class TestDelete(TestCase):
def setup_method(self, method):
if method == self.test_deletes_resized_images_from_storage:
resizer = flexmock(sizes={'nail': (238, 23)})
(resizer
.should_receive('resize_image')
.and_return({'nail': FakeImage('nail', 'jpg')}))
else:
resizer = None
TestCase.setup_method(self, method, resizer)
save(StringIO(u'cinna'), u'games')
self.upload = self.committed_objects[0]
self.added_objects[:] = []
self.committed_objects[:] = []
def test_deletes_from_storage(self):
assert self.storage.exists(u'games')
delete(self.upload)
assert not self.storage.exists(u'games')
def test_deletes_resized_images_from_storage(self):
assert self.storage.exists(u'games_nail.jpg')
delete(self.upload)
assert not self.storage.exists(u'games_nail.jpg')
def test_deletes_from_db(self):
delete(self.upload)
assert self.upload in self.deleted_objects
def test_commits_deletion(self):
delete(self.upload)
assert self.upload in self.committed_objects
|
none
| 1
| 2.368393
| 2
|
|
src/main/resources/pytz/zoneinfo/America/Cordoba.py
|
TheEin/swagger-maven-plugin
| 65
|
6625419
|
'''tzinfo timezone information for America/Cordoba.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Cordoba(DstTzInfo):
'''America/Cordoba timezone definition. See datetime.tzinfo for details'''
zone = 'America/Cordoba'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1920,5,1,4,16,48),
d(1930,12,1,4,0,0),
d(1931,4,1,3,0,0),
d(1931,10,15,4,0,0),
d(1932,3,1,3,0,0),
d(1932,11,1,4,0,0),
d(1933,3,1,3,0,0),
d(1933,11,1,4,0,0),
d(1934,3,1,3,0,0),
d(1934,11,1,4,0,0),
d(1935,3,1,3,0,0),
d(1935,11,1,4,0,0),
d(1936,3,1,3,0,0),
d(1936,11,1,4,0,0),
d(1937,3,1,3,0,0),
d(1937,11,1,4,0,0),
d(1938,3,1,3,0,0),
d(1938,11,1,4,0,0),
d(1939,3,1,3,0,0),
d(1939,11,1,4,0,0),
d(1940,3,1,3,0,0),
d(1940,7,1,4,0,0),
d(1941,6,15,3,0,0),
d(1941,10,15,4,0,0),
d(1943,8,1,3,0,0),
d(1943,10,15,4,0,0),
d(1946,3,1,3,0,0),
d(1946,10,1,4,0,0),
d(1963,10,1,3,0,0),
d(1963,12,15,4,0,0),
d(1964,3,1,3,0,0),
d(1964,10,15,4,0,0),
d(1965,3,1,3,0,0),
d(1965,10,15,4,0,0),
d(1966,3,1,3,0,0),
d(1966,10,15,4,0,0),
d(1967,4,2,3,0,0),
d(1967,10,1,4,0,0),
d(1968,4,7,3,0,0),
d(1968,10,6,4,0,0),
d(1969,4,6,3,0,0),
d(1969,10,5,4,0,0),
d(1974,1,23,3,0,0),
d(1974,5,1,2,0,0),
d(1988,12,1,3,0,0),
d(1989,3,5,2,0,0),
d(1989,10,15,3,0,0),
d(1990,3,4,2,0,0),
d(1990,10,21,3,0,0),
d(1991,3,3,2,0,0),
d(1991,10,20,4,0,0),
d(1992,3,1,2,0,0),
d(1992,10,18,3,0,0),
d(1993,3,7,2,0,0),
d(1999,10,3,3,0,0),
d(2000,3,3,3,0,0),
]
_transition_info = [
i(-15420,0,'CMT'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-14400,0,'WART'),
i(-7200,7200,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-10800,0,'ARST'),
i(-10800,0,'ART'),
]
Cordoba = Cordoba()
|
'''tzinfo timezone information for America/Cordoba.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Cordoba(DstTzInfo):
'''America/Cordoba timezone definition. See datetime.tzinfo for details'''
zone = 'America/Cordoba'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1920,5,1,4,16,48),
d(1930,12,1,4,0,0),
d(1931,4,1,3,0,0),
d(1931,10,15,4,0,0),
d(1932,3,1,3,0,0),
d(1932,11,1,4,0,0),
d(1933,3,1,3,0,0),
d(1933,11,1,4,0,0),
d(1934,3,1,3,0,0),
d(1934,11,1,4,0,0),
d(1935,3,1,3,0,0),
d(1935,11,1,4,0,0),
d(1936,3,1,3,0,0),
d(1936,11,1,4,0,0),
d(1937,3,1,3,0,0),
d(1937,11,1,4,0,0),
d(1938,3,1,3,0,0),
d(1938,11,1,4,0,0),
d(1939,3,1,3,0,0),
d(1939,11,1,4,0,0),
d(1940,3,1,3,0,0),
d(1940,7,1,4,0,0),
d(1941,6,15,3,0,0),
d(1941,10,15,4,0,0),
d(1943,8,1,3,0,0),
d(1943,10,15,4,0,0),
d(1946,3,1,3,0,0),
d(1946,10,1,4,0,0),
d(1963,10,1,3,0,0),
d(1963,12,15,4,0,0),
d(1964,3,1,3,0,0),
d(1964,10,15,4,0,0),
d(1965,3,1,3,0,0),
d(1965,10,15,4,0,0),
d(1966,3,1,3,0,0),
d(1966,10,15,4,0,0),
d(1967,4,2,3,0,0),
d(1967,10,1,4,0,0),
d(1968,4,7,3,0,0),
d(1968,10,6,4,0,0),
d(1969,4,6,3,0,0),
d(1969,10,5,4,0,0),
d(1974,1,23,3,0,0),
d(1974,5,1,2,0,0),
d(1988,12,1,3,0,0),
d(1989,3,5,2,0,0),
d(1989,10,15,3,0,0),
d(1990,3,4,2,0,0),
d(1990,10,21,3,0,0),
d(1991,3,3,2,0,0),
d(1991,10,20,4,0,0),
d(1992,3,1,2,0,0),
d(1992,10,18,3,0,0),
d(1993,3,7,2,0,0),
d(1999,10,3,3,0,0),
d(2000,3,3,3,0,0),
]
_transition_info = [
i(-15420,0,'CMT'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-14400,0,'WART'),
i(-7200,7200,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-10800,0,'ARST'),
i(-10800,0,'ART'),
]
Cordoba = Cordoba()
|
en
| 0.32441
|
tzinfo timezone information for America/Cordoba. America/Cordoba timezone definition. See datetime.tzinfo for details
| 2.935139
| 3
|
ibllib/atlas/__init__.py
|
int-brain-lab/ibllib
| 38
|
6625420
|
<reponame>int-brain-lab/ibllib
from .atlas import * # noqa
from .regions import regions_from_allen_csv
|
from .atlas import * # noqa
from .regions import regions_from_allen_csv
|
none
| 1
| 0.998381
| 1
|
|
githubcommit/handlers.py
|
surrbsk10/githubcommit
| 0
|
6625421
|
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
import os, json, git, urllib, requests
from git import Repo, GitCommandError
from subprocess import check_output
import subprocess
class GitCommitHandler(IPythonHandler):
def error_and_return(self, dirname, reason):
# send error
self.send_error(500, reason=reason)
# return to directory
os.chdir(dirname)
def put(self):
# git parameters from environment variables
# expand variables since Docker's will pass VAR=$VAL as $VAL without expansion
git_dir = "{}/{}".format(os.path.expandvars(os.environ.get('GIT_PARENT_DIR')), os.path.expandvars(os.environ.get('GIT_REPO_NAME')))
git_url = os.path.expandvars(os.environ.get('GIT_REMOTE_URL'))
git_user = os.path.expandvars(os.environ.get('GIT_USER'))
git_repo_upstream = os.path.expandvars(os.environ.get('GIT_REMOTE_UPSTREAM'))
git_branch = git_remote = os.path.expandvars(os.environ.get('GIT_BRANCH_NAME'))
git_access_token = os.path.expandvars(os.environ.get('GITHUB_ACCESS_TOKEN'))
# get the parent directory for git operations
git_dir_parent = os.path.dirname(git_dir)
# obtain filename and msg for commit
data = json.loads(self.request.body.decode('utf-8'))
filename = urllib.parse.unquote(data['filename'])
msg = data['msg']
commit_only_source = data['commit_only_source']
# get current directory (to return later)
cwd = os.getcwd()
print('Surrrrrrrrr')
# select branch within repo
try:
print(f"Git dir nbass:: {git_dir}")
os.chdir(git_dir)
dir_repo = check_output(['git','rev-parse','--show-toplevel']).strip()
repo = Repo(dir_repo.decode('utf8'))
except GitCommandError as e:
self.error_and_return(cwd, "Could not checkout repo: {}".format(dir_repo))
return
# create new branch
try:
print(repo.git.checkout('HEAD', b=git_branch))
except GitCommandError:
print("Switching to {}".format(repo.heads[git_branch].checkout()))
# commit current notebook
# client will sent pathname containing git directory; append to git directory's parent
try:
if commit_only_source :
subprocess.run(['jupyter', 'nbconvert', '--to', 'script', str(filename)])
filename = filename.replace('ipynb', 'py')
print(repo.git.add(str(os.environ.get('GIT_PARENT_DIR') + "/" + os.environ.get('GIT_REPO_NAME'))))
print(repo.git.commit( a=False, m="{}\n\nUpdated {}".format(msg, filename) ))
except GitCommandError as e:
print(e)
self.error_and_return(cwd, "Could not commit changes to notebook: {}".format(git_dir_parent + filename))
return
# create or switch to remote
try:
remote = repo.create_remote(git_remote, git_url)
except GitCommandError:
print("Remote {} already exists...".format(git_remote))
remote = repo.remote(git_remote)
# push changes
try:
pushed = remote.push(git_branch)
assert len(pushed)>0
assert pushed[0].flags in [git.remote.PushInfo.UP_TO_DATE, git.remote.PushInfo.FAST_FORWARD, git.remote.PushInfo.NEW_HEAD, git.remote.PushInfo.NEW_TAG]
except GitCommandError as e:
print(e)
self.error_and_return(cwd, "Could not push to remote {}".format(git_remote))
return
except AssertionError as e:
self.error_and_return(cwd, "Could not push to remote {}: {}".format(git_remote, pushed[0].summary))
return
# open pull request
try:
github_url = "https://api.github.com/repos/{}/pulls".format(git_repo_upstream)
github_pr = {
"title":"{} Notebooks".format(git_user),
"body":"IPython notebooks submitted by {}".format(git_user),
"head":"{}:{}".format(git_user, git_remote),
"base":"master"
}
github_headers = {"Authorization": "token {}".format(git_access_token)}
r = requests.post(github_url, data=json.dumps(github_pr), headers=github_headers)
if r.status_code != 201:
print("Error submitting Pull Request to {}".format(git_repo_upstream))
except:
print("Error submitting Pull Request to {}".format(git_repo_upstream))
# return to directory
os.chdir(cwd)
# close connection
self.write({'status': 200, 'statusText': 'Success! Changes to {} captured on branch {} at {}'.format(filename, git_branch, git_url)})
def setup_handlers(nbapp):
route_pattern = ujoin(nbapp.settings['base_url'], '/git/commit')
nbapp.add_handlers('.*', [(route_pattern, GitCommitHandler)])
|
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
import os, json, git, urllib, requests
from git import Repo, GitCommandError
from subprocess import check_output
import subprocess
class GitCommitHandler(IPythonHandler):
def error_and_return(self, dirname, reason):
# send error
self.send_error(500, reason=reason)
# return to directory
os.chdir(dirname)
def put(self):
# git parameters from environment variables
# expand variables since Docker's will pass VAR=$VAL as $VAL without expansion
git_dir = "{}/{}".format(os.path.expandvars(os.environ.get('GIT_PARENT_DIR')), os.path.expandvars(os.environ.get('GIT_REPO_NAME')))
git_url = os.path.expandvars(os.environ.get('GIT_REMOTE_URL'))
git_user = os.path.expandvars(os.environ.get('GIT_USER'))
git_repo_upstream = os.path.expandvars(os.environ.get('GIT_REMOTE_UPSTREAM'))
git_branch = git_remote = os.path.expandvars(os.environ.get('GIT_BRANCH_NAME'))
git_access_token = os.path.expandvars(os.environ.get('GITHUB_ACCESS_TOKEN'))
# get the parent directory for git operations
git_dir_parent = os.path.dirname(git_dir)
# obtain filename and msg for commit
data = json.loads(self.request.body.decode('utf-8'))
filename = urllib.parse.unquote(data['filename'])
msg = data['msg']
commit_only_source = data['commit_only_source']
# get current directory (to return later)
cwd = os.getcwd()
print('Surrrrrrrrr')
# select branch within repo
try:
print(f"Git dir nbass:: {git_dir}")
os.chdir(git_dir)
dir_repo = check_output(['git','rev-parse','--show-toplevel']).strip()
repo = Repo(dir_repo.decode('utf8'))
except GitCommandError as e:
self.error_and_return(cwd, "Could not checkout repo: {}".format(dir_repo))
return
# create new branch
try:
print(repo.git.checkout('HEAD', b=git_branch))
except GitCommandError:
print("Switching to {}".format(repo.heads[git_branch].checkout()))
# commit current notebook
# client will sent pathname containing git directory; append to git directory's parent
try:
if commit_only_source :
subprocess.run(['jupyter', 'nbconvert', '--to', 'script', str(filename)])
filename = filename.replace('ipynb', 'py')
print(repo.git.add(str(os.environ.get('GIT_PARENT_DIR') + "/" + os.environ.get('GIT_REPO_NAME'))))
print(repo.git.commit( a=False, m="{}\n\nUpdated {}".format(msg, filename) ))
except GitCommandError as e:
print(e)
self.error_and_return(cwd, "Could not commit changes to notebook: {}".format(git_dir_parent + filename))
return
# create or switch to remote
try:
remote = repo.create_remote(git_remote, git_url)
except GitCommandError:
print("Remote {} already exists...".format(git_remote))
remote = repo.remote(git_remote)
# push changes
try:
pushed = remote.push(git_branch)
assert len(pushed)>0
assert pushed[0].flags in [git.remote.PushInfo.UP_TO_DATE, git.remote.PushInfo.FAST_FORWARD, git.remote.PushInfo.NEW_HEAD, git.remote.PushInfo.NEW_TAG]
except GitCommandError as e:
print(e)
self.error_and_return(cwd, "Could not push to remote {}".format(git_remote))
return
except AssertionError as e:
self.error_and_return(cwd, "Could not push to remote {}: {}".format(git_remote, pushed[0].summary))
return
# open pull request
try:
github_url = "https://api.github.com/repos/{}/pulls".format(git_repo_upstream)
github_pr = {
"title":"{} Notebooks".format(git_user),
"body":"IPython notebooks submitted by {}".format(git_user),
"head":"{}:{}".format(git_user, git_remote),
"base":"master"
}
github_headers = {"Authorization": "token {}".format(git_access_token)}
r = requests.post(github_url, data=json.dumps(github_pr), headers=github_headers)
if r.status_code != 201:
print("Error submitting Pull Request to {}".format(git_repo_upstream))
except:
print("Error submitting Pull Request to {}".format(git_repo_upstream))
# return to directory
os.chdir(cwd)
# close connection
self.write({'status': 200, 'statusText': 'Success! Changes to {} captured on branch {} at {}'.format(filename, git_branch, git_url)})
def setup_handlers(nbapp):
route_pattern = ujoin(nbapp.settings['base_url'], '/git/commit')
nbapp.add_handlers('.*', [(route_pattern, GitCommitHandler)])
|
en
| 0.771479
|
# send error # return to directory # git parameters from environment variables # expand variables since Docker's will pass VAR=$VAL as $VAL without expansion # get the parent directory for git operations # obtain filename and msg for commit # get current directory (to return later) # select branch within repo # create new branch # commit current notebook # client will sent pathname containing git directory; append to git directory's parent # create or switch to remote # push changes # open pull request # return to directory # close connection
| 2.156232
| 2
|
src/grokcore/component/tests/adapter/implementsnonemulti.py
|
bielbienne/grokcore.component
| 0
|
6625422
|
<reponame>bielbienne/grokcore.component
"""
Subclasses of grok.Adapter and grok.MultiAdapter must implement exactly one
interface:
>>> grok.testing.grok(__name__)
Traceback (most recent call last):
...
GrokError: <class 'grokcore.component.tests.adapter.implementsnonemulti.Home'> must
implement at least one interface (use grok.implements to specify).
"""
import grokcore.component as grok
class Cave(grok.Context):
pass
class Home(grok.MultiAdapter):
pass
|
"""
Subclasses of grok.Adapter and grok.MultiAdapter must implement exactly one
interface:
>>> grok.testing.grok(__name__)
Traceback (most recent call last):
...
GrokError: <class 'grokcore.component.tests.adapter.implementsnonemulti.Home'> must
implement at least one interface (use grok.implements to specify).
"""
import grokcore.component as grok
class Cave(grok.Context):
pass
class Home(grok.MultiAdapter):
pass
|
en
| 0.417995
|
Subclasses of grok.Adapter and grok.MultiAdapter must implement exactly one interface: >>> grok.testing.grok(__name__) Traceback (most recent call last): ... GrokError: <class 'grokcore.component.tests.adapter.implementsnonemulti.Home'> must implement at least one interface (use grok.implements to specify).
| 2.122558
| 2
|
infovoid/infovoid/apps/services/urls.py
|
wes-o/infovoid
| 0
|
6625423
|
from django.urls import path
from . import views
app_name = 'services'
urlpatterns = [
path('', views.index, name='index'),
path('archive/', views.archive, name='archive'),
path('<int:year>/', views.year, name='year'),
path('<int:year>/<int:month>/', views.month, name='month'),
path('<int:year>/<int:month>/<int:day>/', views.day, name='day'),
]
|
from django.urls import path
from . import views
app_name = 'services'
urlpatterns = [
path('', views.index, name='index'),
path('archive/', views.archive, name='archive'),
path('<int:year>/', views.year, name='year'),
path('<int:year>/<int:month>/', views.month, name='month'),
path('<int:year>/<int:month>/<int:day>/', views.day, name='day'),
]
|
none
| 1
| 1.745524
| 2
|
|
app/migrations/0005_auto_20210701_0342.py
|
Divyanshukalola/Alchemeo
| 0
|
6625424
|
<gh_stars>0
# Generated by Django 2.2.10 on 2021-07-01 03:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210630_0658'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='Invoice_comments',
field=models.TextField(blank=True, verbose_name='Invoice Comments'),
),
migrations.AlterField(
model_name='accessrequest',
name='Request_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 1, 3, 42, 27, 592689), null=True, verbose_name='Date of Request'),
),
migrations.AlterField(
model_name='invoice',
name='Invoice_Date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 1, 3, 42, 27, 591520), null=True, verbose_name='Date of invoice'),
),
]
|
# Generated by Django 2.2.10 on 2021-07-01 03:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210630_0658'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='Invoice_comments',
field=models.TextField(blank=True, verbose_name='Invoice Comments'),
),
migrations.AlterField(
model_name='accessrequest',
name='Request_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 1, 3, 42, 27, 592689), null=True, verbose_name='Date of Request'),
),
migrations.AlterField(
model_name='invoice',
name='Invoice_Date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 7, 1, 3, 42, 27, 591520), null=True, verbose_name='Date of invoice'),
),
]
|
en
| 0.782329
|
# Generated by Django 2.2.10 on 2021-07-01 03:42
| 1.723261
| 2
|
CONTENT/Resources/guides/__UNSORTED/296_best_meeting_point/meetingpoint.py
|
impastasyndrome/DS-ALGO-OFFICIAL
| 13
|
6625425
|
class Solution(object):
def minTotalDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
rows = []
cols = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
rows.append(i)
cols.append(j)
rows.sort()
cols.sort()
distance_x = sum([abs(x - rows[len(rows) / 2]) for x in rows])
distance_y = sum([abs(y - cols[len(cols) / 2]) for y in cols])
return distance_x + distance_y
|
class Solution(object):
def minTotalDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
rows = []
cols = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
rows.append(i)
cols.append(j)
rows.sort()
cols.sort()
distance_x = sum([abs(x - rows[len(rows) / 2]) for x in rows])
distance_y = sum([abs(y - cols[len(cols) / 2]) for y in cols])
return distance_x + distance_y
|
en
| 0.28359
|
:type grid: List[List[int]] :rtype: int
| 3.271884
| 3
|
venv/Lib/site-packages/pandas/io/html.py
|
ajayiagbebaku/NFL-Model
| 6,989
|
6625426
|
"""
:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
from __future__ import annotations
from collections import abc
import numbers
import os
import re
from typing import (
Pattern,
Sequence,
)
from pandas._typing import FilePathOrBuffer
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
)
from pandas.util._decorators import deprecate_nonkeyword_arguments
from pandas.core.dtypes.common import is_list_like
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.io.common import (
is_url,
stringify_path,
urlopen,
validate_header_arg,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def _importers():
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", errors="ignore")
_HAS_BS4 = bs4 is not None
lxml = import_optional_dependency("lxml.etree", errors="ignore")
_HAS_LXML = lxml is not None
html5lib = import_optional_dependency("html5lib", errors="ignore")
_HAS_HTML5LIB = html5lib is not None
_IMPORTS = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}")
def _remove_whitespace(s: str, regex=_RE_WHITESPACE) -> str:
"""
Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : re.Pattern
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(" ", s.strip())
def _get_skiprows(skiprows):
"""
Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
start, step = skiprows.start or 0, skiprows.step or 1
return list(range(start, skiprows.stop, step))
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")
def _read(obj):
"""
Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
text = obj.read()
elif isinstance(obj, (str, bytes)):
text = obj
try:
if os.path.isfile(text):
with open(text, "rb") as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError(f"Cannot read object of type '{type(obj).__name__}'")
return text
class _HtmlFrameParser:
"""
Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding, displayed_only):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""
Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
doc : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag):
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder: list[tuple[int, str, int]] = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, "rowspan") or 1)
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [
x
for x in tbl_list
if "display:none"
not in getattr(x, attr_name).get("style", "").replace(" ", "")
]
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""
HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError("No tables found")
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all(style=re.compile(r"display:\s*none")):
elem.decompose()
if table not in unique_tables and table.find(text=match) is not None:
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError(f"No tables found matching pattern {repr(match.pattern)}")
return result
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
def _parse_tbody_tr(self, table):
from_tbody = table.select("tbody tr")
from_root = table.find_all("tr", recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select("tfoot tr")
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError(f"No text parsed from document: {self.io}")
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
bdoc = self._setup_build_doc()
if isinstance(bdoc, bytes) and self.encoding is not None:
udoc = bdoc.decode(self.encoding)
from_encoding = None
else:
udoc = bdoc
from_encoding = self.encoding
return BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding)
def _build_xpath_expr(attrs) -> str:
"""
Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
s = " and ".join(f"@{k}={repr(v)}" for k, v in attrs.items())
return f"[{s}]"
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
_valid_schemes = "http", "file", "ftp"
class _LxmlFrameParser(_HtmlFrameParser):
"""
HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser can only handle HTTP, FTP, and FILE urls.
See Also
--------
_HtmlFrameParser
_BeautifulSoupLxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
xpath_expr = f"//table//*[re:test(text(), {repr(pattern)})]/ancestor::table"
# if any table attributes were given build an xpath expression to
# search for them
if kwargs:
xpath_expr += _build_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
tables = self._handle_hidden_tables(tables, "attrib")
if self.displayed_only:
for table in tables:
# lxml utilizes XPATH 1.0 which does not have regex
# support. As a result, we find all elements with a style
# attribute and iterate them to check for display:none
for elem in table.xpath(".//*[@style]"):
if "display:none" in elem.attrib.get("style", "").replace(" ", ""):
elem.getparent().remove(elem)
if not tables:
raise ValueError(f"No tables found matching regex {repr(pattern)}")
return tables
def _equals_tag(self, obj, tag):
return obj.tag == tag
def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.etree import XMLSyntaxError
from lxml.html import (
HTMLParser,
fromstring,
parse,
)
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, OSError) as e:
# if the input is a blob of html goop
if not is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, "text_content"):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r
def _parse_thead_tr(self, table):
rows = []
for thead in table.xpath(".//thead"):
rows.extend(thead.xpath("./tr"))
# HACK: lxml does not clean up the clearly-erroneous
# <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add
# the <thead> and _pretend_ it's a <tr>; _parse_td() will find its
# children as though it's a <tr>.
#
# Better solution would be to use html5lib.
elements_at_root = thead.xpath("./td|./th")
if elements_at_root:
rows.append(thead)
return rows
def _parse_tbody_tr(self, table):
from_tbody = table.xpath(".//tbody//tr")
from_root = table.xpath("./tr")
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.xpath(".//tfoot//tr")
def _expand_elements(body):
data = [len(elem) for elem in body]
lens = create_series_with_explicit_dtype(data, dtype_if_empty=object)
lens_max = lens.max()
not_max = lens[lens != lens_max]
empty = [""]
for ind, length in not_max.items():
body[ind] += empty * (lens_max - length)
def _data_to_frame(**kwargs):
head, body, foot = kwargs.pop("data")
header = kwargs.pop("header")
kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"])
if head:
body = head + body
# Infer header when there is a <thead> or top <th>-only rows
if header is None:
if len(head) == 1:
header = 0
else:
# ignore all-empty-text rows
header = [i for i, row in enumerate(head) if any(text for text in row)]
if foot:
body += foot
# fill out elements of body that are "ragged"
_expand_elements(body)
with TextParser(body, header=header, **kwargs) as tp:
return tp.read()
_valid_parsers = {
"lxml": _LxmlFrameParser,
None: _LxmlFrameParser,
"html5lib": _BeautifulSoupHtml5LibFrameParser,
"bs4": _BeautifulSoupHtml5LibFrameParser,
}
def _parser_dispatch(flavor):
"""
Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError(
f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}"
)
if flavor in ("bs4", "html5lib"):
if not _HAS_HTML5LIB:
raise ImportError("html5lib not found, please install it")
if not _HAS_BS4:
raise ImportError("BeautifulSoup4 (bs4) not found, please install it")
# Although we call this above, we want to raise here right before use.
bs4 = import_optional_dependency("bs4") # noqa:F841
else:
if not _HAS_LXML:
raise ImportError("lxml not found, please install it")
return _valid_parsers[flavor]
def _print_as_set(s) -> str:
arg = ", ".join(pprint_thing(el) for el in s)
return f"{{{arg}}}"
def _validate_flavor(flavor):
if flavor is None:
flavor = "lxml", "bs4"
elif isinstance(flavor, str):
flavor = (flavor,)
elif isinstance(flavor, abc.Iterable):
if not all(isinstance(flav, str) for flav in flavor):
raise TypeError(
f"Object of type {repr(type(flavor).__name__)} "
f"is not an iterable of strings"
)
else:
msg = repr(flavor) if isinstance(flavor, str) else str(flavor)
msg += " is not a valid flavor"
raise ValueError(msg)
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
raise ValueError(
f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid "
f"flavors are {_print_as_set(valid_flavors)}"
)
return flavor
def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs, encoding, displayed_only)
try:
tables = p.parse_tables()
except ValueError as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
if hasattr(io, "seekable") and io.seekable():
io.seek(0)
elif hasattr(io, "seekable") and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError(
f"The flavor {flav} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
"another parser. Try read_html() with a different flavor."
) from caught
retained = caught
else:
break
else:
assert retained is not None # for mypy
raise retained
ret = []
for table in tables:
try:
ret.append(_data_to_frame(data=table, **kwargs))
except EmptyDataError: # empty table
continue
return ret
@deprecate_nonkeyword_arguments(version="2.0")
def read_html(
io: FilePathOrBuffer,
match: str | Pattern = ".+",
flavor: str | None = None,
header: int | Sequence[int] | None = None,
index_col: int | Sequence[int] | None = None,
skiprows: int | Sequence[int] | slice | None = None,
attrs: dict[str, str] | None = None,
parse_dates: bool = False,
thousands: str | None = ",",
encoding: str | None = None,
decimal: str = ".",
converters: dict | None = None,
na_values=None,
keep_default_na: bool = True,
displayed_only: bool = True,
) -> list[DataFrame]:
r"""
Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str, path object or file-like object
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str, optional
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like, optional
The column (or list of columns) to use to create the index.
skiprows : int, list-like or slice, optional
Number of rows to skip after parsing the column integer. 0-based. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
displayed_only : bool, default True
Whether elements with "display: none" should be parsed.
Returns
-------
dfs
A list of DataFrames.
See Also
--------
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError(
"cannot skip rows starting from the end of the "
"data (you passed a negative value)"
)
validate_header_arg(header)
io = stringify_path(io)
return _parse(
flavor=flavor,
io=io,
match=match,
header=header,
index_col=index_col,
skiprows=skiprows,
parse_dates=parse_dates,
thousands=thousands,
attrs=attrs,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
|
"""
:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
from __future__ import annotations
from collections import abc
import numbers
import os
import re
from typing import (
Pattern,
Sequence,
)
from pandas._typing import FilePathOrBuffer
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
)
from pandas.util._decorators import deprecate_nonkeyword_arguments
from pandas.core.dtypes.common import is_list_like
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.io.common import (
is_url,
stringify_path,
urlopen,
validate_header_arg,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def _importers():
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", errors="ignore")
_HAS_BS4 = bs4 is not None
lxml = import_optional_dependency("lxml.etree", errors="ignore")
_HAS_LXML = lxml is not None
html5lib = import_optional_dependency("html5lib", errors="ignore")
_HAS_HTML5LIB = html5lib is not None
_IMPORTS = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}")
def _remove_whitespace(s: str, regex=_RE_WHITESPACE) -> str:
"""
Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : re.Pattern
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(" ", s.strip())
def _get_skiprows(skiprows):
"""
Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
start, step = skiprows.start or 0, skiprows.step or 1
return list(range(start, skiprows.stop, step))
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")
def _read(obj):
"""
Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
text = obj.read()
elif isinstance(obj, (str, bytes)):
text = obj
try:
if os.path.isfile(text):
with open(text, "rb") as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError(f"Cannot read object of type '{type(obj).__name__}'")
return text
class _HtmlFrameParser:
"""
Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding, displayed_only):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""
Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
doc : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag):
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder: list[tuple[int, str, int]] = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, "rowspan") or 1)
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [
x
for x in tbl_list
if "display:none"
not in getattr(x, attr_name).get("style", "").replace(" ", "")
]
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""
HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError("No tables found")
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all(style=re.compile(r"display:\s*none")):
elem.decompose()
if table not in unique_tables and table.find(text=match) is not None:
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError(f"No tables found matching pattern {repr(match.pattern)}")
return result
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
def _parse_tbody_tr(self, table):
from_tbody = table.select("tbody tr")
from_root = table.find_all("tr", recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select("tfoot tr")
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError(f"No text parsed from document: {self.io}")
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
bdoc = self._setup_build_doc()
if isinstance(bdoc, bytes) and self.encoding is not None:
udoc = bdoc.decode(self.encoding)
from_encoding = None
else:
udoc = bdoc
from_encoding = self.encoding
return BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding)
def _build_xpath_expr(attrs) -> str:
"""
Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
s = " and ".join(f"@{k}={repr(v)}" for k, v in attrs.items())
return f"[{s}]"
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
_valid_schemes = "http", "file", "ftp"
class _LxmlFrameParser(_HtmlFrameParser):
"""
HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser can only handle HTTP, FTP, and FILE urls.
See Also
--------
_HtmlFrameParser
_BeautifulSoupLxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
xpath_expr = f"//table//*[re:test(text(), {repr(pattern)})]/ancestor::table"
# if any table attributes were given build an xpath expression to
# search for them
if kwargs:
xpath_expr += _build_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
tables = self._handle_hidden_tables(tables, "attrib")
if self.displayed_only:
for table in tables:
# lxml utilizes XPATH 1.0 which does not have regex
# support. As a result, we find all elements with a style
# attribute and iterate them to check for display:none
for elem in table.xpath(".//*[@style]"):
if "display:none" in elem.attrib.get("style", "").replace(" ", ""):
elem.getparent().remove(elem)
if not tables:
raise ValueError(f"No tables found matching regex {repr(pattern)}")
return tables
def _equals_tag(self, obj, tag):
return obj.tag == tag
def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.etree import XMLSyntaxError
from lxml.html import (
HTMLParser,
fromstring,
parse,
)
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, OSError) as e:
# if the input is a blob of html goop
if not is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, "text_content"):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r
def _parse_thead_tr(self, table):
rows = []
for thead in table.xpath(".//thead"):
rows.extend(thead.xpath("./tr"))
# HACK: lxml does not clean up the clearly-erroneous
# <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add
# the <thead> and _pretend_ it's a <tr>; _parse_td() will find its
# children as though it's a <tr>.
#
# Better solution would be to use html5lib.
elements_at_root = thead.xpath("./td|./th")
if elements_at_root:
rows.append(thead)
return rows
def _parse_tbody_tr(self, table):
from_tbody = table.xpath(".//tbody//tr")
from_root = table.xpath("./tr")
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.xpath(".//tfoot//tr")
def _expand_elements(body):
data = [len(elem) for elem in body]
lens = create_series_with_explicit_dtype(data, dtype_if_empty=object)
lens_max = lens.max()
not_max = lens[lens != lens_max]
empty = [""]
for ind, length in not_max.items():
body[ind] += empty * (lens_max - length)
def _data_to_frame(**kwargs):
head, body, foot = kwargs.pop("data")
header = kwargs.pop("header")
kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"])
if head:
body = head + body
# Infer header when there is a <thead> or top <th>-only rows
if header is None:
if len(head) == 1:
header = 0
else:
# ignore all-empty-text rows
header = [i for i, row in enumerate(head) if any(text for text in row)]
if foot:
body += foot
# fill out elements of body that are "ragged"
_expand_elements(body)
with TextParser(body, header=header, **kwargs) as tp:
return tp.read()
_valid_parsers = {
"lxml": _LxmlFrameParser,
None: _LxmlFrameParser,
"html5lib": _BeautifulSoupHtml5LibFrameParser,
"bs4": _BeautifulSoupHtml5LibFrameParser,
}
def _parser_dispatch(flavor):
"""
Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError(
f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}"
)
if flavor in ("bs4", "html5lib"):
if not _HAS_HTML5LIB:
raise ImportError("html5lib not found, please install it")
if not _HAS_BS4:
raise ImportError("BeautifulSoup4 (bs4) not found, please install it")
# Although we call this above, we want to raise here right before use.
bs4 = import_optional_dependency("bs4") # noqa:F841
else:
if not _HAS_LXML:
raise ImportError("lxml not found, please install it")
return _valid_parsers[flavor]
def _print_as_set(s) -> str:
arg = ", ".join(pprint_thing(el) for el in s)
return f"{{{arg}}}"
def _validate_flavor(flavor):
if flavor is None:
flavor = "lxml", "bs4"
elif isinstance(flavor, str):
flavor = (flavor,)
elif isinstance(flavor, abc.Iterable):
if not all(isinstance(flav, str) for flav in flavor):
raise TypeError(
f"Object of type {repr(type(flavor).__name__)} "
f"is not an iterable of strings"
)
else:
msg = repr(flavor) if isinstance(flavor, str) else str(flavor)
msg += " is not a valid flavor"
raise ValueError(msg)
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
raise ValueError(
f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid "
f"flavors are {_print_as_set(valid_flavors)}"
)
return flavor
def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs, encoding, displayed_only)
try:
tables = p.parse_tables()
except ValueError as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
if hasattr(io, "seekable") and io.seekable():
io.seek(0)
elif hasattr(io, "seekable") and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError(
f"The flavor {flav} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
"another parser. Try read_html() with a different flavor."
) from caught
retained = caught
else:
break
else:
assert retained is not None # for mypy
raise retained
ret = []
for table in tables:
try:
ret.append(_data_to_frame(data=table, **kwargs))
except EmptyDataError: # empty table
continue
return ret
@deprecate_nonkeyword_arguments(version="2.0")
def read_html(
io: FilePathOrBuffer,
match: str | Pattern = ".+",
flavor: str | None = None,
header: int | Sequence[int] | None = None,
index_col: int | Sequence[int] | None = None,
skiprows: int | Sequence[int] | slice | None = None,
attrs: dict[str, str] | None = None,
parse_dates: bool = False,
thousands: str | None = ",",
encoding: str | None = None,
decimal: str = ".",
converters: dict | None = None,
na_values=None,
keep_default_na: bool = True,
displayed_only: bool = True,
) -> list[DataFrame]:
r"""
Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str, path object or file-like object
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str, optional
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like, optional
The column (or list of columns) to use to create the index.
skiprows : int, list-like or slice, optional
Number of rows to skip after parsing the column integer. 0-based. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
displayed_only : bool, default True
Whether elements with "display: none" should be parsed.
Returns
-------
dfs
A list of DataFrames.
See Also
--------
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError(
"cannot skip rows starting from the end of the "
"data (you passed a negative value)"
)
validate_header_arg(header)
io = stringify_path(io)
return _parse(
flavor=flavor,
io=io,
match=match,
header=header,
index_col=index_col,
skiprows=skiprows,
parse_dates=parse_dates,
thousands=thousands,
attrs=attrs,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
|
en
| 0.649662
|
:mod:`pandas.io.html` is a module containing functionality for dealing with HTML IO. # import things we need # but make this done on a first use basis ############# # READ HTML # ############# Replace extra whitespace inside of a string with a single space. Parameters ---------- s : str or unicode The string from which to remove extra whitespace. regex : re.Pattern The regular expression to use to remove extra whitespace. Returns ------- subd : str or unicode `s` with all extra whitespace replaced with a single space. Get an iterator given an integer, slice or container. Parameters ---------- skiprows : int, slice, container The iterator to use to skip rows; can also be a slice. Raises ------ TypeError * If `skiprows` is not a slice, integer, or Container Returns ------- it : iterable A proper iterator to use to skip rows of a DataFrame. Try to read from a url, file or string. Parameters ---------- obj : str, unicode, or file-like Returns ------- raw_text : str Base class for parsers that parse HTML into DataFrames. Parameters ---------- io : str or file-like This can be either a string of raw HTML, a valid URL using the HTTP, FTP, or FILE protocols or a file-like object. match : str or regex The text to match in the document. attrs : dict List of HTML <table> element attributes to match. encoding : str Encoding to be used by parser displayed_only : bool Whether or not items with "display:none" should be ignored Attributes ---------- io : str or file-like raw HTML, URL, or file-like object match : regex The text to match in the raw HTML attrs : dict-like A dictionary of valid table attributes to use to search for table elements. encoding : str Encoding to be used by parser displayed_only : bool Whether or not items with "display:none" should be ignored Notes ----- To subclass this class effectively you must override the following methods: * :func:`_build_doc` * :func:`_attr_getter` * :func:`_text_getter` * :func:`_parse_td` * :func:`_parse_thead_tr` * :func:`_parse_tbody_tr` * :func:`_parse_tfoot_tr` * :func:`_parse_tables` * :func:`_equals_tag` See each method's respective documentation for details on their functionality. Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables. Return the attribute value of an individual DOM node. Parameters ---------- obj : node-like A DOM node. attr : str or unicode The attribute, such as "colspan" Returns ------- str or unicode The attribute value. # Both lxml and BeautifulSoup have the same implementation: Return the text of an individual DOM node. Parameters ---------- obj : node-like A DOM node. Returns ------- text : str or unicode The text from an individual DOM node. Return the td elements from a row element. Parameters ---------- obj : node-like A DOM <tr> node. Returns ------- list of node-like These are the elements of each row, i.e., the columns. Return the list of thead row elements from the parsed table element. Parameters ---------- table : a table element that contains zero or more thead elements. Returns ------- list of node-like These are the <tr> row elements of a table. Return the list of tbody row elements from the parsed table element. HTML5 table bodies consist of either 0 or more <tbody> elements (which only contain <tr> elements) or 0 or more <tr> elements. This method checks for both structures. Parameters ---------- table : a table element that contains row elements. Returns ------- list of node-like These are the <tr> row elements of a table. Return the list of tfoot row elements from the parsed table element. Parameters ---------- table : a table element that contains row elements. Returns ------- list of node-like These are the <tr> row elements of a table. Return all tables from the parsed DOM. Parameters ---------- doc : the DOM from which to parse the table element. match : str or regular expression The text to search for in the DOM tree. attrs : dict A dictionary of table attributes that can be used to disambiguate multiple tables on a page. Raises ------ ValueError : `match` does not match any text in the document. Returns ------- list of node-like HTML <table> elements to be parsed into raw data. Return whether an individual DOM node matches a tag Parameters ---------- obj : node-like A DOM node. tag : str Tag name to be checked for equality. Returns ------- boolean Whether `obj`'s tag name is `tag` Return a tree-like object that can be used to iterate over the DOM. Returns ------- node-like The DOM from which to parse the table element. Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th> # The table has no <thead>. Move the top all-<th> rows from # body_rows to header_rows. (This is a common case because many # tables in the wild have no <thead> or <tfoot> Given a list of <tr>s, return a list of text rows. Parameters ---------- rows : list of node-like List of <tr>s Returns ------- list of list Each returned row is a list of str text. Notes ----- Any cell with ``rowspan`` or ``colspan`` will have its contents copied to subsequent cells. # list of rows, each a list of str # list of (index, text, nrows) # the output for this row # Append texts from previous rows with rowspan>1 that come # before this <td> # Append the text from this <td>, colspan times # Append texts from previous rows at the final position # Append rows that only appear because the previous row had non-1 # rowspan Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches `tbl_list` HTML to DataFrame parser that uses BeautifulSoup under the hood. See Also -------- pandas.io.html._HtmlFrameParser pandas.io.html._LxmlFrameParser Notes ----- Documentation strings for this class are in the base class :class:`pandas.io.html._HtmlFrameParser`. # HTML spec: at most one of these lists has content Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. # give class attribute as class_ because class is a python keyword HTML to DataFrame parser that uses lxml under the hood. Warning ------- This parser can only handle HTTP, FTP, and FILE urls. See Also -------- _HtmlFrameParser _BeautifulSoupLxmlFrameParser Notes ----- Documentation strings for this class are in the base class :class:`_HtmlFrameParser`. # Look for direct children only: the "row" element here may be a # <thead> or <tfoot> (see _parse_thead_tr). # 1. check all descendants for the given pattern and only search tables # 2. go up the tree until we find a table # if any table attributes were given build an xpath expression to # search for them # lxml utilizes XPATH 1.0 which does not have regex # support. As a result, we find all elements with a style # attribute and iterate them to check for display:none Raises ------ ValueError * If a URL that lxml cannot parse is passed. Exception * Any other ``Exception`` thrown. For example, trying to parse a URL that is syntactically correct on a machine with no internet connection will fail. See Also -------- pandas.io.html._HtmlFrameParser._build_doc # try to parse the input in the simplest way # if the input is a blob of html goop # HACK: lxml does not clean up the clearly-erroneous # <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add # the <thead> and _pretend_ it's a <tr>; _parse_td() will find its # children as though it's a <tr>. # # Better solution would be to use html5lib. # HTML spec: at most one of these lists has content # Infer header when there is a <thead> or top <th>-only rows # ignore all-empty-text rows # fill out elements of body that are "ragged" Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor` # Although we call this above, we want to raise here right before use. # noqa:F841 # you can pass a compiled regex here # if `io` is an io-like object, check if it's seekable # and try to rewind it before trying the next parser # if we couldn't rewind it, let the user know # for mypy # empty table Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str, path object or file-like object A URL, a file-like object, or a raw string containing HTML. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : str, optional The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like, optional The column (or list of columns) to use to create the index. skiprows : int, list-like or slice, optional Number of rows to skip after parsing the column integer. 0-based. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <https://html.spec.whatwg.org/multipage/dom.html#global-attributes>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <https://html.spec.whatwg.org/multipage/tables.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. na_values : iterable, default None Custom NA values. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. displayed_only : bool, default True Whether elements with "display: none" should be parsed. Returns ------- dfs A list of DataFrames. See Also -------- read_csv : Read a comma-separated values (csv) file into DataFrame. Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables. # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows.
| 2.705254
| 3
|
sceptre/cli/__init__.py
|
shixuyue/sceptre
| 0
|
6625427
|
<reponame>shixuyue/sceptre
# -*- coding: utf-8 -*-
"""
sceptre.cli
This module implements Sceptre's CLI, and should not be directly imported.
"""
import os
import warnings
import click
import colorama
from sceptre import __version__
from sceptre.cli.new import new_group
from sceptre.cli.create import create_command
from sceptre.cli.update import update_command
from sceptre.cli.delete import delete_command
from sceptre.cli.launch import launch_command
from sceptre.cli.diff import diff_command
from sceptre.cli.drift import drift_group
from sceptre.cli.execute import execute_command
from sceptre.cli.describe import describe_group
from sceptre.cli.list import list_group
from sceptre.cli.policy import set_policy_command
from sceptre.cli.status import status_command
from sceptre.cli.helpers import catch_exceptions, setup_vars
from sceptre.cli.template import (validate_command,
generate_command,
estimate_cost_command,
fetch_remote_template_command)
@click.group()
@click.version_option(version=__version__, prog_name="Sceptre")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.option("--dir", "directory", help="Specify sceptre directory.")
@click.option(
"--output", type=click.Choice(["text", "yaml", "json"]), default="text",
help="The formatting style for command output.")
@click.option("--no-colour", is_flag=True, help="Turn off output colouring.")
@click.option(
"--var", multiple=True,
help="A variable to replace the value of an item in config file.")
@click.option(
"--var-file", multiple=True, type=click.File("rb"),
help="A YAML file of variables to replace the values of items in config files.")
@click.option(
"--ignore-dependencies", is_flag=True,
help="Ignore dependencies when executing command.")
@click.option(
"--merge-vars", is_flag=True, default=False,
help="Merge variables from successive --vars and var files")
@click.pass_context
@catch_exceptions
def cli(
ctx, debug, directory, output, no_colour, var, var_file, ignore_dependencies, merge_vars
):
"""
Sceptre is a tool to manage your cloud native infrastructure deployments.
"""
colorama.init()
# Enable deprecation warnings
warnings.simplefilter("always", DeprecationWarning)
ctx.obj = {
"user_variables": setup_vars(var_file, var, merge_vars, debug, no_colour),
"output_format": output,
"no_colour": no_colour,
"ignore_dependencies": ignore_dependencies,
"project_path": directory if directory else os.getcwd()
}
cli.add_command(new_group)
cli.add_command(create_command)
cli.add_command(update_command)
cli.add_command(delete_command)
cli.add_command(launch_command)
cli.add_command(execute_command)
cli.add_command(validate_command)
cli.add_command(estimate_cost_command)
cli.add_command(generate_command)
cli.add_command(set_policy_command)
cli.add_command(status_command)
cli.add_command(list_group)
cli.add_command(describe_group)
cli.add_command(fetch_remote_template_command)
cli.add_command(diff_command)
cli.add_command(drift_group)
|
# -*- coding: utf-8 -*-
"""
sceptre.cli
This module implements Sceptre's CLI, and should not be directly imported.
"""
import os
import warnings
import click
import colorama
from sceptre import __version__
from sceptre.cli.new import new_group
from sceptre.cli.create import create_command
from sceptre.cli.update import update_command
from sceptre.cli.delete import delete_command
from sceptre.cli.launch import launch_command
from sceptre.cli.diff import diff_command
from sceptre.cli.drift import drift_group
from sceptre.cli.execute import execute_command
from sceptre.cli.describe import describe_group
from sceptre.cli.list import list_group
from sceptre.cli.policy import set_policy_command
from sceptre.cli.status import status_command
from sceptre.cli.helpers import catch_exceptions, setup_vars
from sceptre.cli.template import (validate_command,
generate_command,
estimate_cost_command,
fetch_remote_template_command)
@click.group()
@click.version_option(version=__version__, prog_name="Sceptre")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.option("--dir", "directory", help="Specify sceptre directory.")
@click.option(
"--output", type=click.Choice(["text", "yaml", "json"]), default="text",
help="The formatting style for command output.")
@click.option("--no-colour", is_flag=True, help="Turn off output colouring.")
@click.option(
"--var", multiple=True,
help="A variable to replace the value of an item in config file.")
@click.option(
"--var-file", multiple=True, type=click.File("rb"),
help="A YAML file of variables to replace the values of items in config files.")
@click.option(
"--ignore-dependencies", is_flag=True,
help="Ignore dependencies when executing command.")
@click.option(
"--merge-vars", is_flag=True, default=False,
help="Merge variables from successive --vars and var files")
@click.pass_context
@catch_exceptions
def cli(
ctx, debug, directory, output, no_colour, var, var_file, ignore_dependencies, merge_vars
):
"""
Sceptre is a tool to manage your cloud native infrastructure deployments.
"""
colorama.init()
# Enable deprecation warnings
warnings.simplefilter("always", DeprecationWarning)
ctx.obj = {
"user_variables": setup_vars(var_file, var, merge_vars, debug, no_colour),
"output_format": output,
"no_colour": no_colour,
"ignore_dependencies": ignore_dependencies,
"project_path": directory if directory else os.getcwd()
}
cli.add_command(new_group)
cli.add_command(create_command)
cli.add_command(update_command)
cli.add_command(delete_command)
cli.add_command(launch_command)
cli.add_command(execute_command)
cli.add_command(validate_command)
cli.add_command(estimate_cost_command)
cli.add_command(generate_command)
cli.add_command(set_policy_command)
cli.add_command(status_command)
cli.add_command(list_group)
cli.add_command(describe_group)
cli.add_command(fetch_remote_template_command)
cli.add_command(diff_command)
cli.add_command(drift_group)
|
en
| 0.749326
|
# -*- coding: utf-8 -*- sceptre.cli This module implements Sceptre's CLI, and should not be directly imported. Sceptre is a tool to manage your cloud native infrastructure deployments. # Enable deprecation warnings
| 1.893979
| 2
|
cli/tests/int/test_ssm_cli.py
|
eyalstoler/ssm-simple-cli
| 0
|
6625428
|
import os
import boto3
import pytest
from click.testing import CliRunner
from moto import mock_ssm
from cli.src import ssm_cli
from cli.src.ssm_cli import CliConfiguration, DEFAULT_SSM_CONFIG_PARAMS
@pytest.fixture(scope='function')
def aws_credentials(tmpdir):
fake_aws_creds = tmpdir.join("fake_aws_credentials_file")
fake_aws_creds.write("[default]\nAWS_ACCESS_KEY_ID=testing\nAWS_SECRET_ACCESS_KEY=testing\nregion=eu-west-1")
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(fake_aws_creds)
os.environ['AWS_CONFIG_FILE'] = str(fake_aws_creds)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
yield fake_aws_creds
@pytest.fixture
def fake_ssm_cli_config(tmpdir):
config_tmpdir = tmpdir.join("fake_ssm_credentials_file")
config = CliConfiguration(config_tmpdir)
config.setup(**DEFAULT_SSM_CONFIG_PARAMS)
yield config_tmpdir
@pytest.fixture
def fake_ssm_boto_client(aws_credentials):
with mock_ssm():
yield boto3.client('ssm')
# noinspection PyUnusedLocal
def test_should_get_value_when_found(fake_ssm_boto_client, fake_ssm_cli_config):
fake_ssm_boto_client.put_parameter(Name='some-param', Value='some-value', Type='SecureString')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'get', 'some-param'])
assert result.output == 'some-value\n'
assert result.exit_code == 0
# noinspection PyUnusedLocal
def test_should_return_not_found_message_when_parameter_not_found(fake_ssm_boto_client, fake_ssm_cli_config):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'get', 'some-unknown-value'])
assert 'some-unknown-value' in result.output
assert 'not found!' in result.output
assert result.exit_code == 2
# noinspection PyUnusedLocal
def test_should_put_value_successfully(fake_ssm_boto_client, fake_ssm_cli_config):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'put', 'some-param'],
input='some-value\nsome-desc\n')
returned_parameter = fake_ssm_boto_client.get_parameter(Name='some-param', WithDecryption=True)
assert returned_parameter['Parameter']['Value'] == 'some-value'
assert result.exit_code == 0
# noinspection PyUnusedLocal
def test_should_describe_all_parameters_by_name_and_path(fake_ssm_boto_client, fake_ssm_cli_config):
fake_ssm_boto_client.put_parameter(Name='/some-path/some-param1', Value='not-relevant', Type='SecureString')
fake_ssm_boto_client.put_parameter(Name='some-param2', Value='not-relevant', Type='SecureString')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'describe'])
assert result.output == '/some-path/some-param1\nsome-param2\n'
assert result.exit_code == 0
|
import os
import boto3
import pytest
from click.testing import CliRunner
from moto import mock_ssm
from cli.src import ssm_cli
from cli.src.ssm_cli import CliConfiguration, DEFAULT_SSM_CONFIG_PARAMS
@pytest.fixture(scope='function')
def aws_credentials(tmpdir):
fake_aws_creds = tmpdir.join("fake_aws_credentials_file")
fake_aws_creds.write("[default]\nAWS_ACCESS_KEY_ID=testing\nAWS_SECRET_ACCESS_KEY=testing\nregion=eu-west-1")
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(fake_aws_creds)
os.environ['AWS_CONFIG_FILE'] = str(fake_aws_creds)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
yield fake_aws_creds
@pytest.fixture
def fake_ssm_cli_config(tmpdir):
config_tmpdir = tmpdir.join("fake_ssm_credentials_file")
config = CliConfiguration(config_tmpdir)
config.setup(**DEFAULT_SSM_CONFIG_PARAMS)
yield config_tmpdir
@pytest.fixture
def fake_ssm_boto_client(aws_credentials):
with mock_ssm():
yield boto3.client('ssm')
# noinspection PyUnusedLocal
def test_should_get_value_when_found(fake_ssm_boto_client, fake_ssm_cli_config):
fake_ssm_boto_client.put_parameter(Name='some-param', Value='some-value', Type='SecureString')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'get', 'some-param'])
assert result.output == 'some-value\n'
assert result.exit_code == 0
# noinspection PyUnusedLocal
def test_should_return_not_found_message_when_parameter_not_found(fake_ssm_boto_client, fake_ssm_cli_config):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'get', 'some-unknown-value'])
assert 'some-unknown-value' in result.output
assert 'not found!' in result.output
assert result.exit_code == 2
# noinspection PyUnusedLocal
def test_should_put_value_successfully(fake_ssm_boto_client, fake_ssm_cli_config):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'put', 'some-param'],
input='some-value\nsome-desc\n')
returned_parameter = fake_ssm_boto_client.get_parameter(Name='some-param', WithDecryption=True)
assert returned_parameter['Parameter']['Value'] == 'some-value'
assert result.exit_code == 0
# noinspection PyUnusedLocal
def test_should_describe_all_parameters_by_name_and_path(fake_ssm_boto_client, fake_ssm_cli_config):
fake_ssm_boto_client.put_parameter(Name='/some-path/some-param1', Value='not-relevant', Type='SecureString')
fake_ssm_boto_client.put_parameter(Name='some-param2', Value='not-relevant', Type='SecureString')
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(ssm_cli.cli, ['--config_path', fake_ssm_cli_config, 'describe'])
assert result.output == '/some-path/some-param1\nsome-param2\n'
assert result.exit_code == 0
|
en
| 0.19826
|
Mocked AWS Credentials for moto. # noinspection PyUnusedLocal # noinspection PyUnusedLocal # noinspection PyUnusedLocal # noinspection PyUnusedLocal
| 1.766581
| 2
|
utils/csrt_session.py
|
atlassian-labs/connect-security-req-tester
| 14
|
6625429
|
import requests
from requests.adapters import HTTPAdapter
session = None
# Ref: https://stackoverflow.com/a/62044100
# Create an HTTP Adapter that we can mount to a Requests.Session object to globally apply
# a timeout on all requests
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None and hasattr(self, 'timeout'):
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
def create_csrt_session(timeout: int = 30) -> requests.Session:
"""Return a requests.Session object setup in a standard way to make HTTP requests from
Returns:
requests.Session: A session object pre-configured for timeouts, a standardized user-agent, and does not verify SSL/TLS
"""
global session
if not session:
session = requests.Session()
session.mount('http://', TimeoutHTTPAdapter(timeout=timeout))
session.mount('https://', TimeoutHTTPAdapter(timeout=timeout))
session.headers.update(
{'User-Agent': 'CSRT (github.com/atlassian-labs/connect-security-req-tester)'}
)
session.verify = False
return session
|
import requests
from requests.adapters import HTTPAdapter
session = None
# Ref: https://stackoverflow.com/a/62044100
# Create an HTTP Adapter that we can mount to a Requests.Session object to globally apply
# a timeout on all requests
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None and hasattr(self, 'timeout'):
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
def create_csrt_session(timeout: int = 30) -> requests.Session:
"""Return a requests.Session object setup in a standard way to make HTTP requests from
Returns:
requests.Session: A session object pre-configured for timeouts, a standardized user-agent, and does not verify SSL/TLS
"""
global session
if not session:
session = requests.Session()
session.mount('http://', TimeoutHTTPAdapter(timeout=timeout))
session.mount('https://', TimeoutHTTPAdapter(timeout=timeout))
session.headers.update(
{'User-Agent': 'CSRT (github.com/atlassian-labs/connect-security-req-tester)'}
)
session.verify = False
return session
|
en
| 0.75964
|
# Ref: https://stackoverflow.com/a/62044100 # Create an HTTP Adapter that we can mount to a Requests.Session object to globally apply # a timeout on all requests Return a requests.Session object setup in a standard way to make HTTP requests from Returns: requests.Session: A session object pre-configured for timeouts, a standardized user-agent, and does not verify SSL/TLS
| 2.998206
| 3
|
src/robot.py
|
jevancc/ece276a-visual-inertial-slam
| 21
|
6625430
|
import numpy as np
def wedge(x):
assert x.size in [3, 6]
x = x.reshape(-1)
if x.size == 3:
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
elif x.size == 6:
return np.block([[wedge(x[3:]), x[:3].reshape(-1, 1)], [np.array([0, 0, 0, 0])]])
def cwedge(x):
assert x.size == 6
x = x.reshape(-1)
return np.block([[wedge(x[3:]), wedge(x[:3])], [np.zeros((3, 3)), wedge(x[3:])]])
def cdot(x):
x = x.reshape(-1)
assert x.size == 4 and x[-1] == 1
return np.block([[np.eye(3), -wedge(x[:3])], [np.zeros((1, 6))]])
def pi(q):
assert q.ndim == 2 and q.shape[0] == 4
return q / q[2, :]
def dpidq(q):
assert q.size == 4
q = q.reshape(-1)
return (1 / q[2]) * np.array([[1, 0, -q[0] / q[2], 0], [0, 1, -q[1] / q[2], 0], [0, 0, 0, 0],
[0, 0, -q[3] / q[2], 1]])
|
import numpy as np
def wedge(x):
assert x.size in [3, 6]
x = x.reshape(-1)
if x.size == 3:
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
elif x.size == 6:
return np.block([[wedge(x[3:]), x[:3].reshape(-1, 1)], [np.array([0, 0, 0, 0])]])
def cwedge(x):
assert x.size == 6
x = x.reshape(-1)
return np.block([[wedge(x[3:]), wedge(x[:3])], [np.zeros((3, 3)), wedge(x[3:])]])
def cdot(x):
x = x.reshape(-1)
assert x.size == 4 and x[-1] == 1
return np.block([[np.eye(3), -wedge(x[:3])], [np.zeros((1, 6))]])
def pi(q):
assert q.ndim == 2 and q.shape[0] == 4
return q / q[2, :]
def dpidq(q):
assert q.size == 4
q = q.reshape(-1)
return (1 / q[2]) * np.array([[1, 0, -q[0] / q[2], 0], [0, 1, -q[1] / q[2], 0], [0, 0, 0, 0],
[0, 0, -q[3] / q[2], 1]])
|
none
| 1
| 2.715062
| 3
|
|
neointerface/neointerface.py
|
GSK-Biostatistics/neointerface
| 9
|
6625431
|
from neo4j import GraphDatabase # The Neo4j python connectivity library "Neo4j Python Driver"
from neo4j import __version__ as neo4j_driver_version # The version of the Neo4j driver being used
import neo4j.graph # To check returned data types
import numpy as np
import pandas as pd
import inspect
import os
import requests
import re
import json
import time
from urllib.parse import quote
class NeoInterface:
"""
High level class to interact with neo4j from Python.
It provides a higher-level wrapper around the Neo4j python connectivity library "Neo4j Python Driver",
documented at: https://neo4j.com/docs/api/python-driver/current/api.html
SECTIONS IN THIS CLASS:
* INIT
* METHODS TO RUN GENERIC QUERIES
* METHODS TO RETRIEVE DATA
* METHODS TO CREATE/MODIFY SCHEMA
* METHODS TO CREATE/MODIFY DATA
* METHODS TO CREATE NEW RELATIONSHIPS
* METHODS TO READ IN DATA
* UTILITY METHODS
* METHODS TO SUPPORT DEBUGGING
* METHODS TO SUPPORT JSON IMPORT/EXPORT
* METHODS TO SUPPORT RDF PROCEDURES
AUTHORS:
<NAME> and <NAME>, GlaxoSmithKline
Based in part on Neo4jLiaison library (MIT License: https://github.com/BrainAnnex/neo4j-liaison)
"""
def __init__(self,
host=os.environ.get("NEO4J_HOST"),
credentials=(os.environ.get("NEO4J_USER"), os.environ.get("NEO4J_PASSWORD")),
apoc=False,
rdf=False,
rdf_host=None,
verbose=True,
debug=False,
autoconnect=True):
"""
If unable to create a Neo4j driver object, raise an Exception reminding the user to check whether the Neo4j database is running
:param host: URL to connect to database with. DEFAULT: read from NEO4J_HOST environmental variable
:param credentials: Pair of strings (tuple or list) containing, respectively, the database username and password
DEFAULT: read from NEO4J_USER and NEO4J_PASSWORD environmental variables
if None then no authentication is used
:param apoc: Flag indicating whether apoc library is used on Neo4j database to connect to
:param verbose: Flag indicating whether a verbose mode is to be used by all methods of this class
:param debug: Flag indicating whether a debug mode is to be used by all methods of this class
:param autoconnect Flag indicating whether the class should establish connection to database at initialization
"""
self.verbose = verbose
self.debug = debug
self.autoconnect = autoconnect
self.host = host
self.credentials = credentials
self.apoc = apoc
self.rdf = rdf
self.rdf_host = rdf_host
if self.verbose:
print("---------------- Initializing NeoInterface -------------------")
if self.autoconnect: # TODO: add test for autoconnect == False
# Attempt to create a driver object
self.connect()
# Extra initializations if APOC custom procedures (note: APOC must also be enabled on the database)
# if apoc:
# self.setup_all_apoc_custom()
# Extra initializations if RDF support required
if self.rdf:
self.rdf_setup_connection()
def connect(self) -> None:
try:
if self.credentials:
user, password = self.credentials # This unpacking will work whether the credentials were passed as a tuple or list
self.driver = GraphDatabase.driver(self.host, auth=(
user, password)) # Object to connect to Neo4j's Bolt driver for Python
else:
self.driver = GraphDatabase.driver(self.host,
auth=None) # Object to connect to Neo4j's Bolt driver for Python
if self.verbose:
print(f"Connection to {self.host} established")
except Exception as ex:
error_msg = f"CHECK IF NEO4J IS RUNNING! While instantiating the NeoInterface object, failed to create the driver: {ex}"
raise Exception(error_msg)
def rdf_config(self) -> None:
try:
self.query("CALL n10s.graphconfig.init({handleVocabUris:'IGNORE'});")
except:
if self.debug:
print("Config already created, make sure the config is correct")
self.create_constraint(label="Resource", key="uri", type="UNIQUE", name="n10s_unique_uri")
def rdf_setup_connection(self) -> None:
self.rdf_config()
if not self.rdf_host:
self.rdf_host = os.environ.get("NEO4J_RDF_HOST")
if not self.rdf_host:
bolt_port = re.findall(r'\:\d+', self.host)[0]
self.rdf_host = self.host.replace(bolt_port, ":7474").replace("bolt", "http").replace("neoj", "http")
self.rdf_host += ("" if self.rdf_host.endswith("/") else "/") + "rdf/"
try:
get_response = json.loads(requests.get(f"{self.rdf_host}ping", auth=self.credentials).text)
if self.verbose:
if "here!" in get_response.values():
print(f"Connection to {self.rdf_host} established")
except:
error_msg = f"CHECK IF RDF ENDPOINT IS SET UP CORRECTLY! While instantiating the NeoInterface object, failed to connect to {self.rdf_host}"
raise Exception(error_msg)
def version(self) -> str:
# Return the version of the Neo4j driver being used. EXAMPLE: "4.2.1"
return neo4j_driver_version
def close(self) -> None:
"""
Terminate the database connection.
Note: this method is automatically invoked after the last operation of a "with" statement
:return: None
"""
if self.driver is not None:
self.driver.close()
############################################################################################
# #
# METHODS TO RUN GENERIC QUERIES #
# #
############################################################################################
def query(self, q: str, params=None) -> []:
"""
Run a general Cypher query and return a list of dictionaries.
In cases of error, return an empty list.
A new session to the database driver is started, and then immediately terminated after running the query.
NOTE: if the Cypher query returns a node, and one wants to extract its internal Neo4j ID or labels
(in addition to all the properties and their values) then use query_expanded() instead.
:param q: A Cypher query
:param params: An optional Cypher dictionary
EXAMPLE, assuming that the cypher string contains the substrings "$node_id":
{'node_id': 20}
:return: A (possibly empty) list of dictionaries. Each dictionary in the list
will depend on the nature of the Cypher query.
EXAMPLES:
Cypher returns nodes (after finding or creating them): RETURN n1, n2
-> list item such as {'n1': {'gender': 'M', 'patient_id': 123}
'n2': {'gender': 'F', 'patient_id': 444}}
Cypher returns attribute values that get renamed: RETURN n.gender AS client_gender, n.pid AS client_id
-> list items such as {'client_gender': 'M', 'client_id': 123}
Cypher returns attribute values without renaming: RETURN n.gender, n.pid
-> list items such as {'n.gender': 'M', 'n.pid': 123}
Cypher returns a single computed value
-> a single list item such as {"count(n)": 100}
Cypher returns a single relationship, with or without attributes: MERGE (c)-[r:PAID_BY]->(p)
-> a single list item such as [{ 'r': ({}, 'PAID_BY', {}) }]
Cypher creates nodes (without returning them)
-> empty list
"""
# Start a new session, use it, and then immediately close it
with self.driver.session() as new_session:
result = new_session.run(q, params)
# Note: result is a neo4j.Result object;
# more specifically, an object of type neo4j.work.result.Result
# See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result
if result is None:
return []
data_as_list = result.data() # Return the result as a list of dictionaries.
# This must be done inside the "with" block,
# while the session is still open
return data_as_list
def query_expanded(self, q: str, params=None, flatten=False) -> []:
"""
Expanded version of query(), meant to extract additional info for queries that return Graph Data Types,
i.e. nodes, relationships or paths,
such as "MATCH (n) RETURN n", or "MATCH (n1)-[r]->(n2) RETURN r"
For example, if nodes were returned, and their Neo4j internal IDs and/or labels are desired
(in addition to all the properties and their values)
Unless the flatten flag is True, individual records are kept as separate lists.
For example, "MATCH (b:boat), (c:car) RETURN b, c"
will return a structure such as [ [b1, c1] , [b2, c2] ] if flatten is False,
vs. [b1, c1, b2, c2] if flatten is True. (Note: each b1, c1, etc, is a dictionary.)
TODO: Scenario to test:
if b1 == b2, would that still be [b1, c1, b1(b2), c2] or [b1, c1, c2] - i.e. would we remove the duplicates?
Try running with flatten=True "MATCH (b:boat), (c:car) RETURN b, c" on data like "CREATE (b:boat), (c1:car1), (c2:car2)"
:param q: A Cypher query
:param params: An optional Cypher dictionary
EXAMPLE, assuming that the cypher string contains the substring "$age":
{'age': 20}
:param flatten: Flag indicating whether the Graph Data Types need to remain clustered by record,
or all placed in a single flattened list.
:return: A (possibly empty) list of dictionaries, which will depend on which Graph Data Types
were returned in the Cypher query.
EXAMPLE - for a returned node
{'gender': 'M', 'age': 20, 'neo4j_id': 123, 'neo4j_labels': ['patient']}
EXAMPLE - for a returned relationship
{'price': 7500, 'neo4j_id': 2,
'neo4j_start_node': <Node id=11 labels=frozenset() properties={}>,
'neo4j_end_node': <Node id=14 labels=frozenset() properties={}>,
'neo4j_type': 'bought_by'}]
"""
# Start a new session, use it, and then immediately close it
with self.driver.session() as new_session:
result = new_session.run(q, params)
# Note: result is a neo4j.Result iterable object;
# more specifically, an object of type neo4j.work.result.Result
# See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result
if result is None:
return []
data_as_list = []
# The following must be done inside the "with" block, while the session is still open
for record in result:
# Note: record is a neo4j.Record object - an immutable ordered collection of key-value pairs.
# (the keys are the dummy names used for the nodes, such as "n")
# See https://neo4j.com/docs/api/python-driver/current/api.html#record
# EXAMPLE of record (if node n was returned):
# <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}>>
# (it has one key, "n")
# EXAMPLE of record (if node n and node c were returned):
# <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}>
# c=<Node id=66 labels=frozenset({'car'}) properties={'color': 'blue'}>>
# (it has 2 keys, "n" and "c")
data = []
for item in record:
# Note: item is a neo4j.graph.Node object
# OR a neo4j.graph.Relationship object
# OR a neo4j.graph.Path object
# See https://neo4j.com/docs/api/python-driver/current/api.html#node
# https://neo4j.com/docs/api/python-driver/current/api.html#relationship
# https://neo4j.com/docs/api/python-driver/current/api.html#path
# EXAMPLES of item:
# <Node id=95 labels=frozenset({'car'}) properties={'color': 'white', 'make': 'Toyota'}>
# <Relationship id=12 nodes=(<Node id=147 labels=frozenset() properties={}>, <Node id=150 labels=frozenset() properties={}>) type='bought_by' properties={'price': 7500}>
neo4j_properties = dict(item.items()) # EXAMPLE: {'gender': 'M', 'age': 99}
if isinstance(item, neo4j.graph.Node):
neo4j_properties["neo4j_id"] = item.id # Example: 227
neo4j_properties["neo4j_labels"] = list(item.labels) # Example: ['person', 'client']
elif isinstance(item, neo4j.graph.Relationship):
neo4j_properties["neo4j_id"] = item.id # Example: 227
neo4j_properties[
"neo4j_start_node"] = item.start_node # A neo4j.graph.Node object with "id", "labels" and "properties"
neo4j_properties[
"neo4j_end_node"] = item.end_node # A neo4j.graph.Node object with "id", "labels" and "properties"
# Example: <Node id=118 labels=frozenset({'car'}) properties={'color': 'white'}>
neo4j_properties["neo4j_type"] = item.type # The name of the relationship
elif isinstance(item, neo4j.graph.Path):
neo4j_properties["neo4j_nodes"] = item.nodes # The sequence of Node objects in this path
if flatten:
data_as_list.append(neo4j_properties)
else:
data.append(neo4j_properties)
if not flatten:
data_as_list.append(data)
return data_as_list
##################################################################################################
# #
# METHODS TO RETRIEVE DATA #
# #
##################################################################################################
def get_single_field(self, field_name: str, labels="", properties_condition=None, cypher_clause=None,
cypher_dict=None) -> list:
"""
For situations where one is fetching just 1 field,
and one desires a list of those values, rather than a dictionary of records.
In other respects, similar to the more general get_nodes()
EXAMPLES: fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"})
will RETURN a list of prices of all the Toyota models
fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"}, clause="n.price < 50000")
will RETURN a list of prices of all the Toyota models that cost less than 50000
:param field_name: A string with the name of the desired field (attribute)
For more information on the other parameters, see get_nodes()
:return: A list of the values of the field_name attribute in the nodes that match the specified conditions
"""
record_list = self.get_nodes(labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
single_field_list = [record.get(field_name) for record in record_list]
return single_field_list
def get_nodes(self, labels="", properties_condition=None, cypher_clause=None, cypher_dict=None,
return_nodeid=False, return_labels=False) -> [{}]:
"""
EXAMPLES:
get_nodes("") # Get ALL nodes
get_nodes("client")
get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"})
get_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000")
get_nodes("client", cypher_clause = "n.age > $some_age", cypher_dict = {"$some_age": 40})
get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} ,
cypher_clause = "n.age > 40 OR n.income < 50000")
RETURN a list of the records (as dictionaries of ALL the key/value node properties)
corresponding to all the Neo4j nodes with the specified label,
AND satisfying the given Cypher CLAUSE (if present),
AND exactly matching ALL of the specified property key/values pairs (if present).
I.e. an implicit AND operation.
IMPORTANT: nodes referred to in the Cypher clause must be specified as "n."
A dictionary of data binding (cypher_dict) for the Cypher clause may be optionally specified.
In case of conflict (any key overlap) between the dictionaries cypher_dict and properties_condition, and Exception is raised.
Optionally, the Neo4j internal node ID and label name(s) may also be obtained and returned.
:param labels: A string (or list/tuple of strings) specifying one or more Neo4j labels;
an empty string indicates that the match is to be carried out
across all labels - NOT RECOMMENDED for large databases!
(Note: blank spaces ARE allowed in the strings)
:param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40}
:param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n."
EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000"
EXAMPLE with data-binding: "n.age > $age OR n.income < $income"
(data-binding values are specified in cypher_dict)
:param properties_condition: A (possibly-empty) dictionary of property key/values pairs. Example: {"gender": "M", age: 64}
IMPORTANT: cypher_dict and properties_dict must have no overlapping keys, or an Exception will be raised
:param return_nodeid: Flag indicating whether to also include the Neo4j internal node ID in the returned data
(using "neo4j_id" as its key in the returned dictionary)
:param return_labels: Flag indicating whether to also include the Neo4j label names in the returned data
(using "neo4j_labels" as its key in the returned dictionary)
:return: A list whose entries are dictionaries with each record's information
(the node's attribute names are the keys)
EXAMPLE: [ {"gender": "M", "age": 42, "condition_id": 3},
{"gender": "M", "age": 76, "location": "Berkeley"}
]
Note that ALL the attributes of each node are returned - and that they may vary across records.
If the flag return_nodeid is set to True, then an extra key/value pair is included in the dictionaries,
of the form "neo4j_id": some integer with the Neo4j internal node ID
If the flag return_labels is set to True, then an extra key/value pair is included in the dictionaries,
of the form "neo4j_labels": [list of Neo4j label(s) attached to that node]
EXAMPLE using both of the above flags:
[ {"neo4j_id": 145, "neo4j_labels": ["person", "client"], "gender": "M", "age": 42, "condition_id": 3},
{"neo4j_id": 222, "neo4j_labels": ["person"], "gender": "M", "age": 76, "location": "Berkeley"}
]
# TODO: provide an option to specify the desired fields
"""
(cypher, cypher_dict) = self._match_nodes(labels=labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
cypher += " RETURN n"
if self.debug:
print(f"""
In get_nodes().
query: {cypher}
parameters: {cypher_dict}
""")
result_list = self.query_expanded(cypher, cypher_dict, flatten=True)
if return_nodeid and return_labels:
# If we want to return both 'neo4j_id' and 'neo4j_labels', we're done, because query_expanded() provides both
return result_list
# If we get thus far, it means that either the 'neo4j_id' or the 'neo4j_labels' attribute isn't wanted;
# remove the unwanted one from all the dictionaries in the elements of result_list
for node_dict in result_list:
if not return_nodeid:
del node_dict['neo4j_id']
if not return_labels:
del node_dict['neo4j_labels']
return result_list
def get_df(self, labels="", properties_condition=None, cypher_clause=None, cypher_dict=None,
return_nodeid=False, return_labels=False) -> pd.DataFrame:
"""
Same as get_nodes(), but the result is returned as a Pandas dataframe
[See get_nodes() for information about the arguments]
:param labels:
:param properties_condition:
:param cypher_clause:
:param cypher_dict:
:param return_nodeid:
:param return_labels:
:return: A Pandas dataframe
"""
result_list = self.get_nodes(labels=labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict,
return_nodeid=return_nodeid, return_labels=return_labels)
return pd.DataFrame(result_list)
def _match_nodes(self, labels, properties_condition=None, cypher_clause=None, cypher_dict=None) -> (str, dict):
"""
Turn a set of specification into the MATCH part of the Cypher query, and its data-binding dictionary.
EXAMPLES:
_match_nodes("client")
_match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"})
_match_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000")
_match_nodes("client", cypher_clause = "n.age > $age",
cypher_dict = {"$age": 40})
_match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} ,
cypher_clause = "n.age > 40 OR n.income < 50000")
RETURN the MATCH part of the Cypher query, and its data-binding dictionary,
corresponding to all the Neo4j nodes with the given label or labels (if specified),
AND satisfying the given Cypher CLAUSE (if specified, and optionally with the data-binding cypher_dict),
AND exactly matching ALL of the specified property key/values pairs (if specified).
I.e. an implicit AND operation.
Note: cypher_dict should not contain keys of the form `par_n`, where n is an integer, or an Exception might results.
:param labels: A string, or list/tuple of strings, of Neo4j labels (Note: blank spaces ARE allowed)
:param properties_condition: A (possibly-empty) dictionary of property key/values pairs.
Example: {"gender": "F", "age": 22}
If None or empty, no restrictions are placed on the match
:param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n."
EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000"
EXAMPLE with data-binding: "n.age > $age OR n.income < $income"
(data-binding values are specified in cypher_dict)
:param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40}
It should not contain any keys of the form `par_n`, where n is an integer
(those names are reserved for internal use)
:return: A pair consisting of the MATCH part of the Cypher query, and its data-binding dictionary
"""
if properties_condition is None:
clause_from_properties = ""
else:
# Transform the dictionary properties_condition into a string describing its corresponding Cypher clause,
# plus a corresponding data-binding dictionary.
# (assuming an implicit AND between equalities described by the dictionary terms),
#
# EXAMPLE:
# properties_condition: {"gender": "F", "year first met": 2003}
# will lead to:
# clause_from_properties = "{`gender`: $par_1, `year first met`: $par_2}"
# props_data_binding = {'par_1': "F", 'par_2': 2003}
(clause_from_properties, props_data_binding) = self.dict_to_cypher(properties_condition)
if cypher_dict is None:
cypher_dict = props_data_binding # The properties dictionary is to be used as the Cypher-binding dictionary
else:
# Merge the properties dictionary into the existing cypher_dict, PROVIDED that there's no conflict
overlap = cypher_dict.keys() & props_data_binding.keys() # Take the set intersection
if overlap != set(): # If not equal to the empty set
raise Exception(
f"`cypher_dict` should not contain any keys of the form `par_n` where n is an integer. "
f"Those names are reserved for internal use. Conflicting names: {overlap}")
cypher_dict.update(props_data_binding) # Merge the properties dictionary into the existing cypher_dict
if cypher_dict is None:
cypher_dict = {}
if cypher_clause is not None:
cypher_clause = cypher_clause.strip() # Zap any leading/trailing blanks
# Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher
cypher_labels = self._prepare_labels(labels)
# Construct the Cypher string
cypher = f"MATCH (n {cypher_labels} {clause_from_properties})"
if (cypher_clause != "") and (cypher_clause is not None):
cypher += f" WHERE {cypher_clause}"
return (cypher, cypher_dict)
def _prepare_labels(self, labels) -> str:
"""
Turn the given string, or list/tuple of strings - representing Neo4j labels - into a string
suitable for inclusion in a Cypher query.
Blanks ARE allowed in names.
EXAMPLES:
"client" gives rise to ":`client`"
["car", "car manufacturer"] gives rise to ":`car`:`car manufacturer`"
:param labels: A string, or list/tuple of strings, representing Neo4j labels
:return: A string suitable for inclusion in a Cypher query
"""
# Turn the label strings, or list/tuple of labels, into a string suitable for inclusion into Cypher
if labels == "":
return ""
if type(labels) == str:
labels = [labels]
cypher_labels = ""
for single_label in labels:
cypher_labels += f":`{single_label}`" # EXAMPLE: ":`label 1`:`label 2`"
return cypher_labels
def get_parents_and_children(self, node_id: int) -> {}:
"""
Fetch all the nodes connected to the given one by INbound relationships to it (its "parents"),
as well as by OUTbound relationships to it (its "children")
:param node_id: An integer with a Neo4j internal node ID
:return: A dictionary with 2 keys: 'parent_list' and 'child_list'
The values are lists of dictionaries with 3 keys: "id", "label", "rel"
EXAMPLE of individual items in either parent_list or child_list:
{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}
"""
with self.driver.session() as new_session:
# Fetch the parents
cypher = f"MATCH (parent)-[inbound]->(n) WHERE id(n) = {node_id} " \
"RETURN id(parent) AS id, labels(parent) AS labels, type(inbound) AS rel"
if self.debug:
print(f"""
query: {cypher}
""")
result_obj = new_session.run(cypher) # A new neo4j.Result object
parent_list = result_obj.data()
# EXAMPLE of parent_list:
# [{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'},
# {'id': 150, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}]
if self.verbose:
print(f"parent_list for node {node_id}:", parent_list)
# Fetch the children
cypher = f"MATCH (n)-[outbound]->(child) WHERE id(n) = {node_id} " \
"RETURN id(child) AS id, labels(child) AS labels, type(outbound) AS rel"
if self.debug:
print(f"""
query: {cypher}
""")
result_obj = new_session.run(cypher) # A new neo4j.Result object
child_list = result_obj.data()
# EXAMPLE of child_list:
# [{'id': 107, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'},
# {'id': 103, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'}]
if self.verbose:
print(f"child_list for node {node_id}:", child_list)
return {'parent_list': parent_list, 'child_list': child_list}
def get_labels(self) -> [str]:
"""
Extract and return a list of all the Neo4j labels present in the database.
No particular order should be expected.
TODO: test when there are nodes that have multiple labels
:return: A list of strings
"""
results = self.query("call db.labels() yield label return label")
return [x['label'] for x in results]
def get_relationshipTypes(self) -> [str]:
"""
Extract and return a list of all the Neo4j relationship types present in the database.
No particular order should be expected.
:return: A list of strings
"""
results = self.query("call db.relationshipTypes() yield relationshipType return relationshipType")
return [x['relationshipType'] for x in results]
def get_label_properties(self, label: str) -> list:
q = """
CALL db.schema.nodeTypeProperties()
YIELD nodeLabels, propertyName
WHERE $label in nodeLabels and propertyName IS NOT NULL
RETURN DISTINCT propertyName
ORDER BY propertyName
"""
params = {'label': label}
if self.debug:
print("q : ", q, " | params : ", params)
return [res['propertyName'] for res in self.query(q, params)]
#########################################################################################
# #
# METHODS TO GET/CREATE/MODIFY SCHEMA #
# #
#########################################################################################
def get_indexes(self, types=None) -> pd.DataFrame:
"""
Return all the database indexes, and some of their attributes,
as a Pandas dataframe.
Optionally restrict the type (such as "BTREE") of indexes returned.
EXAMPLE:
labelsOrTypes name properties type uniqueness
0 [my_label] index_23b0962b [my_property] BTREE NONUNIQUE
1 [my_label] some_name [another_property] BTREE UNIQUE
:param types: Optional list to of types to limit the result to
:return: A (possibly-empty) Pandas dataframe
"""
if types:
where = "with * where type in $types" # Define a restrictive clause
else:
types = []
where = ""
q = f"""
call db.indexes()
yield name, labelsOrTypes, properties, type, uniqueness
{where}
return *
"""
results = self.query(q, {"types": types})
if len(results) > 0:
return pd.DataFrame(list(results))
else:
return pd.DataFrame([], columns=['name'])
def get_constraints(self) -> pd.DataFrame:
"""
Return all the database constraints, and some of their attributes,
as a Pandas dataframe with 3 columns:
name EXAMPLE: "my_constraint"
description EXAMPLE: "CONSTRAINT ON ( patient:patient ) ASSERT (patient.patient_id) IS UNIQUE"
details EXAMPLE: "Constraint( id=3, name='my_constraint', type='UNIQUENESS',
schema=(:patient {patient_id}), ownedIndex=12 )"
:return: A (possibly-empty) Pandas dataframe
"""
q = """
call db.constraints()
yield name, description, details
return *
"""
results = self.query(q)
if len(results) > 0:
return pd.DataFrame(list(results))
else:
return pd.DataFrame([], columns=['name'])
def create_index(self, label: str, key: str) -> bool:
"""
Create a new database index, unless it already exists,
to be applied to the specified label and key (property).
The standard name given to the new index is of the form label.key
EXAMPLE - to index nodes labeled "car" by their key "color":
create_index("car", "color")
This new index - if not already in existence - will be named "car.color"
If an existing index entry contains a list of labels (or types) such as ["l1", "l2"] ,
and a list of properties such as ["p1", "p2"] ,
then the given pair (label, key) is checked against ("l1_l2", "p1_p2"), to decide whether it already exists.
:param label: A string with the node label to which the index is to be applied
:param key: A string with the key (property) name to which the index is to be applied
:return: True if a new index was created, or False otherwise
"""
existing_indexes = self.get_indexes() # A Pandas dataframe with info about indexes;
# in particular 2 columns named "labelsOrTypes" and "properties"
# Index is created if not already exists.
# a standard name for the index is assigned: `{label}.{key}`
existing_standard_name_pairs = list(existing_indexes.apply(
lambda x: ("_".join(x['labelsOrTypes']), "_".join(x['properties'])), axis=1)) # Proceed by row
"""
For example, if the Pandas dataframe existing_indexes contains the following columns:
labelsOrTypes properties
0 [car] [color, make]
1 [person] [sex]
then existing_standard_names will be: [('car', 'color_make'), ('person', 'sex')]
"""
if (label, key) not in existing_standard_name_pairs:
q = f'CREATE INDEX `{label}.{key}` FOR (s:`{label}`) ON (s.`{key}`)'
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
return True
else:
return False
def create_constraint(self, label: str, key: str, type="UNIQUE", name=None) -> bool:
"""
Create a uniqueness constraint for a node property in the graph,
unless a constraint with the standard name of the form `{label}.{key}.{type}` is already present
Note: it also creates an index, and cannot be applied if an index already exists.
EXAMPLE: create_constraint("patient", "patient_id")
:param label: A string with the node label to which the constraint is to be applied
:param key: A string with the key (property) name to which the constraint is to be applied
:param type: For now, the default "UNIQUE" is the only allowed option
:param name: Optional name to give to the new constraint; if not provided, a
standard name of the form `{label}.{key}.{type}` is used. EXAMPLE: "patient.patient_id.UNIQUE"
:return: True if a new constraint was created, or False otherwise
"""
assert type == "UNIQUE"
# TODO: consider other types of constraints
existing_constraints = self.get_constraints()
# constraint is created if not already exists.
# a standard name for a constraint is assigned: `{label}.{key}.{type}` if name was not provided
cname = (name if name else f"`{label}.{key}.{type}`")
if cname in list(existing_constraints['name']):
return False
try:
q = f'CREATE CONSTRAINT {cname} ON (s:`{label}`) ASSERT s.`{key}` IS UNIQUE'
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
# Note: creation of a constraint will crash if another constraint, or index, already exists
# for the specified label and key
return True
except Exception:
return False
def drop_index(self, name: str) -> bool:
"""
Eliminate the index with the specified name.
:param name: Name of the index to eliminate
:return: True if successful or False otherwise (for example, if the index doesn't exist)
"""
try:
q = f"DROP INDEX `{name}`"
if self.debug:
print(f"""
query: {q}
""")
self.query(q) # Note: it crashes if the index doesn't exist
return True
except Exception:
return False
def drop_all_indexes(self, including_constraints=True) -> None:
"""
Eliminate all the indexes in the database and, optionally, also get rid of all constraints
:param including_constraints: Flag indicating whether to also ditch all the constraints
:return: None
"""
if including_constraints:
if self.apoc:
self.query("call apoc.schema.assert({},{})")
else:
self.drop_all_constraints()
indexes = self.get_indexes()
for name in indexes['name']:
self.drop_index(name)
def drop_constraint(self, name: str) -> bool:
"""
Eliminate the constraint with the specified name.
:param name: Name of the constraint to eliminate
:return: True if successful or False otherwise (for example, if the constraint doesn't exist)
"""
try:
q = f"DROP CONSTRAINT `{name}`"
if self.debug:
print(f"""
query: {q}
""")
self.query(q) # Note: it crashes if the constraint doesn't exist
return True
except Exception:
return False
def drop_all_constraints(self) -> None:
"""
Eliminate all the constraints in the database
:return: None
"""
constraints = self.get_constraints()
for name in constraints['name']:
if not (self.rdf and name == 'n10s_unique_uri'):
self.drop_constraint(name)
#####################################################################################
# #
# METHODS TO CREATE/MODIFY DATA #
# #
#####################################################################################
def create_node_by_label_and_dict(self, labels, properties=None) -> int:
"""
Create a new node with the given label and with the attributes/values specified in the items dictionary
Return the Neo4j internal ID of the node just created.
:param labels: A string, or list/tuple of strings, of Neo4j label (ok to include blank spaces)
:param properties: An optional dictionary of properties to set for the new node.
EXAMPLE: {'age': 22, 'gender': 'F'}
:return: An integer with the Neo4j internal ID of the node just created
"""
if properties is None:
properties = {}
# From the dictionary of attribute names/values,
# create a part of a Cypher query, with its accompanying data dictionary
(attributes_str, data_dictionary) = self.dict_to_cypher(properties)
# EXAMPLE:
# attributes_str = '{`cost`: $par_1, `item description`: $par_2}'
# data_dictionary = {'par_1': 65.99, 'par_2': 'the "red" button'}
# Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher
cypher_labels = self._prepare_labels(labels)
# Assemble the complete Cypher query
cypher = f"CREATE (n {cypher_labels} {attributes_str}) RETURN n"
if self.debug:
print(f"""
In create_node_by_label_and_dict().
query: {cypher}
parameters: {data_dictionary}
""")
result_list = self.query_expanded(cypher, data_dictionary, flatten=True)
return result_list[0]['neo4j_id'] # Return the Neo4j internal ID of the node just created
def delete_nodes_by_label(self, delete_labels=None, keep_labels=None, batch_size=50000) -> None:
"""
Empty out (by default completely) the Neo4j database.
Optionally, only delete nodes with the specified labels, or only keep nodes with the given labels.
Note: the keep_labels list has higher priority; if a label occurs in both lists, it will be kept.
IMPORTANT: it does NOT clear indexes; "ghost" labels may remain! To get rid of those, run drop_all_indexes()
:param delete_labels: An optional string, or list of strings, indicating specific labels to DELETE
:param keep_labels: An optional string or list of strings, indicating specific labels to KEEP
(keep_labels has higher priority over delete_labels)
:return: None
"""
if (delete_labels is None) and (keep_labels is None):
# Delete ALL nodes AND ALL relationship from the database; for efficiency, do it all at once
if self.verbose:
print(f" --- Deleting all nodes in the database ---")
if batch_size: # In order to avoid memory errors, delete data in batches
q = f"""
call apoc.periodic.iterate(
'MATCH (n) RETURN n',
'DETACH DELETE(n)',
{{batchSize:{str(batch_size)}, parallel:false}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
else:
q = "MATCH (n) DETACH DELETE(n)"
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
return
if not delete_labels:
delete_labels = self.get_labels() # If no specific labels to delete were given,
# then consider all labels for possible deletion (unless marked as "keep", below)
else:
if type(delete_labels) == str:
delete_labels = [delete_labels] # If a string was passed, turn it into a list
if not keep_labels:
keep_labels = [] # Initialize list of labels to keep, if not provided
else:
if type(keep_labels) == str:
keep_labels = [keep_labels] # If a string was passed, turn it into a list
# Delete all nodes with labels in the delete_labels list,
# EXCEPT for any label in the keep_labels list
for label in delete_labels:
if not (label in keep_labels):
if self.verbose:
print(f" --- Deleting nodes with label: `{label}` ---")
q = f"MATCH (x:`{label}`) DETACH DELETE x"
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def clean_slate(self, keep_labels=None, drop_indexes=True, drop_constraints=True) -> None:
"""
Use this to get rid of absolutely everything in the database.
Optionally, keep nodes with a given label, or keep the indexes, or keep the constraints
:param keep_labels: An optional list of strings, indicating specific labels to KEEP
:param drop_indexes: Flag indicating whether to also ditch all indexes (by default, True)
:param drop_constraints:Flag indicating whether to also ditch all constraints (by default, True)
:return: None
"""
if self.rdf:
self.delete_nodes_by_label(
keep_labels=(keep_labels + ['_GraphConfig'] if keep_labels else ['_GraphConfig']))
else:
self.delete_nodes_by_label(keep_labels=keep_labels)
if drop_indexes:
self.drop_all_indexes(including_constraints=drop_constraints)
def set_fields(self, labels, set_dict, properties_condition=None, cypher_clause=None, cypher_dict=None) -> None:
"""
EXAMPLE - locate the "car" with vehicle id 123 and set its color to white and price to 7000
set_fields(labels = "car", set_dict = {"color": "white", "price": 7000},
properties_condition = {"vehicle id": 123})
LIMITATION: blanks are allowed in the keys of properties_condition, but not in those of set_dict
:param labels: A string, or list/tuple of strings, representing Neo4j labels
:param set_dict: A dictionary of field name/values to create/update the node's attributes
(note: no blanks are allowed in the keys)
:param properties_condition:
:param cypher_clause:
:param cypher_dict:
:return: None
"""
(cypher_match, cypher_dict) = self._match_nodes(labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
set_list = []
for field_name, field_value in set_dict.items(): # field_name, field_value are key/values in set_dict
set_list.append("n.`" + field_name + "` = $" + field_name) # Example: "n.`field1` = $field1"
cypher_dict[field_name] = field_value # Extend the Cypher data-binding dictionary
# Example of data_binding at the end of the loop: {"field1": value1, "field2": value2}
set_clause = "SET " + ", ".join(set_list) # Example: "SET n.field1 = $field1, n.field2 = $field2"
cypher = cypher_match + set_clause
# Example of cypher:
# "MATCH (n:car {`vehicle id`: $par_1}) SET n.`color` = color, n.`price` = $field2"
# Example of data binding:
# {"par_1": 123, "color": "white", "price": 7000}
if self.debug:
print("cypher: ", cypher)
print("data_binding: ", cypher_dict)
self.query(cypher, cypher_dict)
def extract_entities(self,
mode='merge',
label=None,
cypher=None,
cypher_dict=None,
target_label=None,
property_mapping={},
relationship=None,
direction='<'
):
"""
:param mode:str; assert mode in ['merge', 'create']
:param label:str; label of the nodes to extract data from
:param cypher: str; only of label not provided: cypher that returns id(node) of the nodes to extract data from
EXAMPLE:
cypher = '''
MATCH (f:`Source Data Table`{{_domain_:$domain}})-[:HAS_DATA]->(node:`Source Data Row`)
RETURN id(node)
'''
:param cypher_dict: None/dict parameters required for the cypher query
EXAMPLE:
cypher_dict={'domain':'ADSL'}
:param target_label: label(s) of the newly created nodes with extracted data: list or str
:param property_mapping: dict or list
if dict: keys correspond to the property names of source data (e.g. Source Data Row) and values correspond
to to the property names of the target class where the data is extracted to
if list: properties of the extracted node (as per the list) will extracted and will be named same as
in the source node
:param relationship: type of the relationship (to/from the extraction node) to create
:param direction: direction of the relationship to create (>: to the extraction node, <: from the extraction node)
:return: None
"""
assert mode in ['merge', 'create']
assert direction in ['>', '<']
assert type(property_mapping) in [dict, list]
assert type(target_label) in [list, str] or target_label is None
if target_label:
if type(target_label) == str:
target_label = [target_label]
if type(property_mapping) == list:
property_mapping = {k: k for k in property_mapping}
for key in property_mapping.keys():
for lbl in target_label:
self.create_index(lbl, key)
self.create_index(label, key)
q_match_part = f"MATCH (data:`{label}`) RETURN data"
q_match_altered = False
if cypher:
if not cypher_dict:
cypher_dict = {}
all = [x[1:] for x in re.findall(r'\$\w+\b', cypher)]
missing_params = set(all) - set(cypher_dict.keys())
if not missing_params:
q_match_part = """
CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value
MATCH (data) WHERE id(data) = value['id(node)']
RETURN data
"""
q_match_altered = True
else:
if self.debug:
print("ERROR: not all parameters have been supplied in cypher_dict, missing: ", missing_params)
rel_left = ('' if direction == '>' else '<')
rel_right = ('>' if direction == '>' else '')
q = f"""
call apoc.periodic.iterate(
$q_match_part
,
'
WITH data, apoc.coll.intersection(keys($mapping), keys(data)) as common_keys
{("" if mode == "create" else "WHERE size(common_keys) > 0")}
WITH data, apoc.map.fromLists([key in common_keys | $mapping[key]], [key in common_keys | data[key]]) as submap
call apoc.{mode}.node($target_label, submap) YIELD node MERGE (data){rel_left}-[:`{relationship}`]-{rel_right}(node)
',
{{batchSize:10000, parallel:false, params: $inner_params}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
inner_params = {'target_label': target_label,
'mapping': property_mapping}
if q_match_altered:
inner_params = {**inner_params, 'cypher': cypher, 'cypher_dict': cypher_dict}
params = {'q_match_part': q_match_part, 'target_label': target_label, 'inner_params': inner_params}
res = self.query(q, params)
if self.debug:
print(" Query : ", q)
print(" Query parameters: ", params)
print(" Result of above query : ", res, "\n")
#########################################################################################
# #
# METHODS TO CREATE NEW RELATIONSHIPS #
# #
#########################################################################################
def link_entities(self,
left_class: str,
right_class: str,
relationship="_default_",
cond_via_node=None,
cond_left_rel=None,
cond_right_rel=None,
cond_cypher=None,
cond_cypher_dict=None):
"""
Creates relationship of type {relationship} ...
:param left_class: Name of the left class
:param right_class: Name of the right class
:param relationship: Name to give the relationship (if None: will use name of right_class (f'HAS_{right_class.upper())')
:param cond_via_node: Name of central node from which relationships will be created
:param cond_left_rel: Name and direction of relationship from right_class (e.g. FROM_DATA> or <FROM_DATA)
:param cond_right_rel: Name and direction of relationship from left_class (e.g. FROM_DATA> or <FROM_DATA)
:param cond_cypher: (optional) - if not None: cond_via_node, cond_left_rel, cond_right_rel will be ignored
instead the cypher query will be run which return nodes 'left' and 'right' to be linked
with relationship of type {relationship}
:param cond_cypher_dict: parameters required for the cypher query
"""
# checking compliance of provided parameters
if not cond_cypher:
assert not (cond_left_rel.startswith("<") and cond_left_rel.endswith(">"))
assert not (cond_right_rel.startswith("<") and cond_right_rel.endswith(">"))
if relationship == '_default_':
relationship = f'HAS_{right_class.upper()}'
cond_left_rel_mark1 = ""
cond_left_rel_mark2 = ""
if cond_left_rel.startswith("<"):
cond_left_rel_mark1 = "<"
if cond_left_rel.endswith(">"):
cond_left_rel_mark2 = ">"
cond_left_rel_type = re.sub(r'^(\<)?(.*?)(\>)?$', r'\2', cond_left_rel)
cond_right_rel_mark1 = ""
cond_right_rel_mark2 = ""
if cond_right_rel.startswith("<"):
cond_right_rel_mark1 = "<"
if cond_right_rel.endswith(">"):
cond_right_rel_mark2 = ">"
cond_right_rel_type = re.sub(r'^(\<)?(.*?)(\>)?$', r'\2', cond_right_rel)
if cond_cypher:
if self.verbose:
print(
f"Using cypher condition to link nodes. Labels: {left_class}, {right_class}; Cypher: {cond_cypher}")
periodic_part1 = """
CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value
RETURN value.`left` as left, value.`right` as right
"""
else:
periodic_part1 = f'''
MATCH (left){cond_left_rel_mark1}-[:`{cond_left_rel_type}`*0..1]-{cond_left_rel_mark2}(sdr:`{cond_via_node}`),
(sdr){cond_right_rel_mark1}-[:`{cond_right_rel_type}`*0..1]-{cond_right_rel_mark2}(right)
WHERE left:`{left_class}` and right:`{right_class}`
RETURN left, right
'''
q = f"""
call apoc.periodic.iterate(
'{periodic_part1}',
'
MERGE (left)-[:`{relationship}`]->(right)
',
{{batchSize:10000, parallel:false, params: {{cypher: $cypher, cypher_dict: $cypher_dict}}}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
params = {'cypher': cond_cypher, 'cypher_dict': cond_cypher_dict}
if self.debug:
print(" Query : ", q)
print(" Query parameters: ", params)
self.query(q, params)
def link_nodes_on_matching_property(self, label1: str, label2: str, property1: str, rel: str,
property2=None) -> None:
"""
Locate any pair of Neo4j nodes where all of the following hold:
1) the first one has label1
2) the second one has label2
3) the two nodes agree in the value of property1 (if property2 is None),
or in the values of property1 in the 1st node and property2 in the 2nd node
For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node,
unless already present
:param label1: A string against which the label of the 1st node must match
:param label2: A string against which the label of the 2nd node must match
:param property1: Name of property that must be present in the 1st node (and also in 2nd node, if property2 is None)
:param property2: Name of property that must be present in the 2nd node (may be None)
:param rel: Name to give to all relationships that get created
:return: None
"""
if not property2:
property2 = property1
q = f'''MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{property1}` = y.`{property2}`
MERGE (x)-[:{rel}]->(y)'''
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def link_nodes_on_matching_property_value(self, label1: str, label2: str, prop_name: str, prop_value: str,
rel: str) -> None:
"""
Locate any pair of Neo4j nodes where all of the following hold:
1) the first one has label1
2) the second one has label2
3) both nodes have a property with the specified name
4) the string values of the properties in (3) in the two nodes are both equal to the specified value
For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node,
unless already present
:param label1: A string against which the label of the 1st node must match
:param label2: A string against which the label of the 2nd node must match
:param prop_name: Name of property that must be present in both nodes
:param prop_value: A STRING value that the above property must have in both nodes
:param rel: Name to give to all relationships that get created
:return: None
"""
q = f'''MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{prop_name}` = "{prop_value}" AND y.`{prop_name}` = "{prop_value}"
MERGE (x)-[:{rel}]->(y)'''
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def link_nodes_by_ids(self, node_id1: int, node_id2: int, rel: str, rel_props=None) -> None:
"""
Locate the pair of Neo4j nodes with the given Neo4j internal ID's.
If they are found, add a relationship - with the name specified in the rel argument,
and with the specified optional properties - from the 1st to 2nd node, unless already present
TODO: maybe return the Neo4j ID of the relationship just created
:param node_id1: An integer with the Neo4j internal ID to locate the 1st node
:param node_id2: An integer with the Neo4j internal ID to locate the 2nd node
:param rel: A string specifying a Neo4j relationship name
:param rel_props: Optional dictionary with the relationship properties. EXAMPLE: {'since': 2003, 'code': 'xyz'}
:return: None
"""
cypher_rel_props, cypher_dict = self.dict_to_cypher(rel_props) # Process the optional relationship properties
# EXAMPLE of cypher_rel_props: '{cost: $par_1, code: $par_2}' (possibly blank)
# EXAMPLE of cypher_dict: {'par_1': 65.99, 'par_2': 'xyz'} (possibly empty)
q = f"""
MATCH (x), (y)
WHERE id(x) = $node_id1 and id(y) = $node_id2
MERGE (x)-[:`{rel}` {cypher_rel_props}]->(y)
"""
# Extend the (possibly empty) Cypher data dictionary, to also include a value for the key "node_id1" and "node_id2"
cypher_dict["node_id1"] = node_id1
cypher_dict["node_id2"] = node_id2
if self.debug:
print(f"""
query: {q}
parameters: {cypher_dict}
""")
self.query(q, cypher_dict)
#####################################################################################################
# #
# METHODS TO READ IN DATA #
# #
#####################################################################################################
def load_df(
self,
df: pd.DataFrame,
label: str,
merge=False,
primary_key=None,
merge_overwrite=False,
rename=None,
max_chunk_size=10000) -> list:
"""
Load a Pandas data frame into Neo4j.
Each line is loaded as a separate node.
TODO: maybe save the Panda data frame's row number as an attribute of the Neo4j nodes, to ALWAYS have a primary key
:param df: A Pandas data frame to import into Neo4j
:param label: String with a Neo4j label to use on the newly-created nodes
:param merge: If True, records are replaced, rather than added, if already present;
if False, always added
:param primary_key: Only applicable when merging. String with the name of the field that
serves as a primary key; if a new record with that field is to be added,
it'll replace the current one
TODO: to allow for list of primary_keys
:param merge_overwrite: If True then on merge the existing nodes will be overwritten with the new data,
otherwise they will be updated with new information (keys that are not present in the df
will not be deleted)
:param rename: Optional dictionary to rename the Pandas dataframe's columns to
EXAMPLE {"current_name": "name_we_want"}
:param max_chunk_size: To limit the number of rows loaded at one time
:return: List of node ids, created in the operation
"""
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
if rename is not None:
df = df.rename(rename, axis=1) # Rename the columns in the Pandas data frame
primary_key_s = ''
if primary_key is not None:
neo_indexes = self.get_indexes()
if f"{label}.{primary_key}" not in list(neo_indexes['name']):
self.create_index(label, primary_key)
time.sleep(1) # sleep to give Neo4j time to populate the index
primary_key_s = '{' + f'`{primary_key}`:record[\'{primary_key}\']' + '}'
# EXAMPLE of primary_key_s: "{patient_id:record['patient_id']}"
op = 'MERGE' if (merge and primary_key) else 'CREATE' # A MERGE or CREATE operation, as needed
res = []
for df_chunk in np.array_split(df, int(len(df.index) / max_chunk_size) + 1): # Split the operation into batches
cypher = f'''
WITH $data AS data
UNWIND data AS record {op} (x:`{label}`{primary_key_s})
SET x{('' if merge_overwrite else '+')}=record
RETURN id(x) as node_id
'''
cypher_dict = {'data': df_chunk.to_dict(orient='records')}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
res_chunk = self.query(cypher, cypher_dict)
if res_chunk:
res += [r['node_id'] for r in res_chunk]
return res
def load_dict(self, dct: dict, label="Root", rel_prefix="", maxdepth=10):
"""
Loads python dict to Neo4j (auto-unpacking hierarchy)
Children of type dict converted into related nodes with relationship {rel_prefix}_{key}
Children of type list (of dict or other) converted into:
- multiple related nodes for list items of type dict
- properties of parent node of type list in case list items
see example in tests.test_json.test_import_custom_json
:param dct: python dict to load
:param label: label to assign to the root node
:param rel_prefix: prefix to add to relationship name from parent to child
:param maxdepth: maximum possible depth(of children) of dict
:return: None
"""
# initial load of the complete json as a node
j = json.dumps(dct)
self.query(
"""
CALL apoc.merge.node(['JSON',$label],{value:$value})
YIELD node
RETURN node
"""
,
{'label': label, 'value': j}
)
i = 0
# unpacking hierarchy (looping until no nodes with JSON label are left or maxdepth reached
while (self.query("MATCH (j:JSON) RETURN j LIMIT 1")) and i < maxdepth:
self.query("""
MATCH (j:JSON)
WITH j, apoc.convert.fromJsonMap(j.value) as map
WITH j, map, keys(map) as ks UNWIND ks as k
call apoc.do.case([
apoc.meta.type(map[k]) = 'MAP'
,
'
CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson($map[$k])}) YIELD node
CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel
RETURN node, rel
'
,
apoc.meta.type(map[k]) = 'LIST'
,
'
//first setting LIST property on main node
WITH j, map, k, [i in map[k] WHERE apoc.meta.type(i) <> "MAP"] as not_map_lst
call apoc.do.when(
size(not_map_lst) <> 0,
"call apoc.create.setProperty([j], $k, $not_map_lst) YIELD node RETURN node",
"RETURN j",
{j:j, k:k, not_map_lst:not_map_lst}
) YIELD value
WITH *, [i in map[k] WHERE NOT i IN not_map_lst] as map_lst
UNWIND map_lst as item_map
CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson(item_map)}) YIELD node
CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel
RETURN node, rel
'
]
,
'
call apoc.create.setProperty([j], $k, $map[$k]) YIELD node
RETURN node
'
,
{k: k, map: map, j: j, rel_prefix: $rel_prefix}
) YIELD value
WITH DISTINCT j
REMOVE j:JSON
REMOVE j.value
""", {"rel_prefix": rel_prefix})
i += 1
def load_arrows_dict(self, dct: dict, merge_on=None, always_create=None, timestamp=False):
"""
Loads data created in prototyping tool https://arrows.app/
Uses MERGE statement separately on each node and each relationship using all properties as identifying properties
Example of use:
with open("arrows.json", 'r') as jsonfile:
dct = json.load(jsonfile)
neo = NeoInterface()
neo.load_arrows_dict(dct)
:param dct: python dict to load
:param merge_on: None or dict with label as key and list of properties as value - the properties will be used
as identProps in apoc.merge.node, the rest of the properties will be used as onCreateProps and onMatchProps
:return: result of the corresponding Neo4j query
"""
assert merge_on is None or isinstance(merge_on, dict)
if not merge_on:
merge_on = {}
for key, item in merge_on.items():
assert isinstance(item, list)
assert always_create is None or isinstance(always_create, list)
# if merge_on:
q = """
UNWIND $map['nodes'] as nd
WITH *, apoc.coll.intersection(nd['labels'], keys($merge_on)) as hc_labels // list of relevant labels from the merge_on map
WITH *, apoc.coll.toSet(apoc.coll.flatten(apoc.map.values($merge_on, hc_labels))) as hc_props // list of relevant properties
WITH *, [prop in hc_props WHERE prop in keys(nd['properties'])] as hc_props // filter to keep only the existing ones
WITH
*,
CASE WHEN size(nd['labels']) = 0 THEN
['No Label']
ELSE
nd['labels']
END as labels,
CASE WHEN size(hc_props) > 0 THEN
{
identProps:
CASE WHEN size(apoc.coll.intersection(keys(nd['properties']), hc_props)) = 0 and nd['caption'] <> '' THEN
{value: nd['caption']}
ELSE
apoc.map.submap(nd['properties'], hc_props)
END
,
onMatchProps: apoc.map.submap(nd['properties'], [key in keys(nd['properties'])
WHERE NOT key IN hc_props])
}
ELSE
{
identProps:
CASE WHEN size(keys(nd['properties'])) = 0 and nd['caption'] <> '' THEN
{value: nd['caption']}
ELSE
nd['properties']
END
,
onMatchProps: {}
}
END as props
WITH
nd,
labels,
props['identProps'] as identProps,
props['onMatchProps'] as onMatchProps,
props['onMatchProps'] as onCreateProps //TODO: change if these need to differ in the future
//dummy property if no properties are ident
WITH *, CASE WHEN identProps = {} THEN {_dummy_prop_:1} ELSE identProps END as identProps
""" + \
("""
WITH
*,
apoc.map.mergeList([onCreateProps, {_timestamp: timestamp()}]) as onCreateProps,
apoc.map.mergeList([onMatchProps, {_timestamp: timestamp()}]) as onMatchProps
""" if timestamp else "") + \
"""
CALL apoc.do.when(
size(apoc.coll.intersection(labels, $always_create)) > 0,
"CALL apoc.create.node($labels, apoc.map.mergeList([$identProps, $onMatchProps, $onCreateProps])) YIELD node RETURN node",
"CALL apoc.merge.node($labels, $identProps, $onMatchProps, $onCreateProps) YIELD node RETURN node",
{labels: labels, identProps:identProps, onMatchProps:onMatchProps, onCreateProps:onCreateProps}
) yield value as value2
WITH *, value2['node'] as node
//eliminating dummy property
CALL apoc.do.when(
identProps = {_dummy_prop_: 1},
'REMOVE node._dummy_prop_ RETURN node',
'RETURN node',
{node: node}
) YIELD value
WITH *
WITH apoc.map.fromPairs(collect([nd['id'], node])) as node_map
UNWIND $map['relationships'] as rel
call apoc.merge.relationship(
node_map[rel['fromId']],
CASE WHEN rel['type'] = '' OR rel['type'] IS NULL THEN 'RELATED' ELSE rel['type'] END,
rel['properties'],
{},
node_map[rel['toId']], {}
)
YIELD rel as relationship
WITH node_map, apoc.map.fromPairs(collect([rel['id'], relationship])) as rel_map
RETURN node_map, rel_map
"""
params = {
'map': dct,
'merge_on': (merge_on if merge_on else {}),
'always_create': (always_create if always_create else [])
}
res = self.query(q, params)
if res:
return res[0]
else:
return None
############################################################################################
# #
# UTILITY METHODS #
# #
############################################################################################
def dict_to_cypher(self, data_dict: {}) -> (str, {}):
"""
Turn a Python dictionary (meant for specifying node or relationship attributes)
into a string suitable for Cypher queries,
plus its corresponding data-binding dictionary.
EXAMPLE :
{'cost': 65.99, 'item description': 'the "red" button'}
will lead to
(
'{`cost`: $par_1, `item description`: $par_2}',
{'par_1': 65.99, 'par_2': 'the "red" button'}
)
Note that backticks are used in the Cypher string to allow blanks in the key names.
Consecutively-named dummy variables ($par_1, $par_2, etc) are used,
instead of names based on the keys of the data dictionary (such as $cost),
because the keys might contain blanks.
:param data_dict: A Python dictionary
:return: A pair consisting of a string suitable for Cypher queries,
and a corresponding data-binding dictionary.
If the passed dictionary is empty or None,
the pair returned is ("", {})
"""
if data_dict is None or data_dict == {}:
return ("", {})
rel_props_list = [] # A list of strings
data_dictionary = {}
parameter_count = 1 # Sequential integers used in the data dictionary, such as "par_1", "par_2", etc.
for prop_key, prop_value in data_dict.items():
parameter_token = f"par_{parameter_count}" # EXAMPLE: "par_3"
# Extend the list of Cypher property relationships and their corresponding data dictionary
rel_props_list.append(f"`{prop_key}`: ${parameter_token}") # The $ refers to the data binding
data_dictionary[parameter_token] = prop_value
parameter_count += 1
rel_props_str = ", ".join(rel_props_list)
rel_props_str = "{" + rel_props_str + "}"
return (rel_props_str, data_dictionary)
############################################################################################
# #
# METHODS TO SUPPORT DEBUGGING #
# #
############################################################################################
def neo4j_query_params_from_dict(self, params: dict, char_limit=500) -> str:
"""
Given a Python dictionary, meant to represent value/key pairs,
compose and return a string suitable for pasting into the Neo4j browser, for testing purposes.
EXAMPLE: {'age': 22, 'gender': 'F'}
will produce the string
:param age=> 22;
:param gender=> 'F';
:param params: query parameters in the form of python dict
:param char_limit: limit number of characters to include in each line
:return: string of parameters to paste into Neo4j browser for testing procedures in the browser
"""
s = "" # String suitable for pasting into the Neo4j browser
for key, item in params.items():
prefix = "".join([":param ", key, "=> "])
if type(item) == int:
res = ("".join([prefix, str(item), ";"]))
elif type(item) == dict:
cypher_dict = "".join(["apoc.map.fromPairs([" + ",".join(
[f"['{key2}', {item2}]" for key2, item2 in item.items()]) + "])"])
res = ("".join([prefix, cypher_dict, ";"]))
else:
res = ("".join([prefix, "".join(['\'', str(item), '\'']), ";"]))
s += res[:char_limit] + "\n"
return s
############################################################################################
# #
# METHODS TO SUPPORT JSON IMPORT/EXPORT #
# #
############################################################################################
def export_dbase_json(self) -> {}:
"""
Export the entire Neo4j database as a JSON string
EXAMPLE:
{ 'nodes': 2,
'relationships': 1,
'properties': 6,
'data': '[{"type":"node","id":"3","labels":["User"],"properties":{"name":"Adam","age":32,"male":true}},\n
{"type":"node","id":"4","labels":["User"],"properties":{"name":"Eve","age":18}},\n
{"id":"1","type":"relationship","label":"KNOWS","properties":{"since":2003},"start":{"id":"3","labels":["User"]},"end":{"id":"4","labels":["User"]}}\n
]'
}
NOTE: the Neo4j Browser uses a slightly different format for NODES:
{
"identity": 4,
"labels": [
"User"
],
"properties": {
"name": "Eve",
"age": 18
}
}
and a substantially more different format for RELATIONSHIPS:
{
"identity": 1,
"start": 3,
"end": 4,
"type": "KNOWS",
"properties": {
"since": 2003
}
}
:return: A dictionary specifying the number of nodes exported,
the number of relationships, and the number of properties,
as well as a "data" field with the actual export in JSON format
"""
cypher = '''
CALL apoc.export.json.all(null,{useTypes:true, stream: true})
YIELD nodes, relationships, properties, data
RETURN nodes, relationships, properties, data
'''
result = self.query(cypher) # It returns a list with a single element
export_dict = result[0]
# print(export_dict)
pseudo_json = export_dict["data"]
# Who knows why, the string returned by the APOC function isn't actual JSON! :o Some tweaking needed to produce valid JSON...
json = "[" + pseudo_json.replace("\n", ",\n ") + "\n]" # The newlines \n make the JSON much more human-readable
export_dict["data"] = json
# print(export_dict)
return export_dict
def import_json_data(self, json_str: str):
"""
Import nodes and/or relationships into the database, as directed by the given data dump in JSON form.
Note: the id's of the nodes need to be shifted,
because one cannot force the Neo4j internal id's to be any particular value...
and, besides (if one is importing into an existing database), particular id's may already be taken.
:param json_str: A JSON string with the format specified under export_dbase_json()
:return: A status message with import details if successful, or an Exception if not
"""
try:
json_list = json.loads(json_str) # Turn the string (representing a JSON list) into a list
except Exception as ex:
raise Exception(f"Incorrectly-formatted JSON string. {ex}")
if self.debug:
print("json_list: ", json_list)
assert type(json_list) == list, "The JSON string does not represent the expected list"
id_shifting = {} # To map the Neo4j internal ID's specified in the JSON data dump
# into the ID's of newly-created nodes
# Do an initial pass for correctness, to try to avoid partial imports
for i, item in enumerate(json_list):
# We use item.get(key_name) to handle without error situation where the key is missing
if (item.get("type") != "node") and (item.get("type") != "relationship"):
raise Exception(
f"Item in list index {i} must have a 'type' of either 'node' or 'relationship'. Nothing imported. Item: {item}")
if item["type"] == "node":
if "id" not in item:
raise Exception(
f"Item in list index {i} is marked as 'node' but it lacks an 'id'. Nothing imported. Item: {item}")
elif item["type"] == "relationship":
if "label" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'label'. Nothing imported. Item: {item}")
if "start" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'start' value. Nothing imported. Item: {item}")
if "end" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'end' value. Nothing imported. Item: {item}")
if "id" not in item["start"]:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but its 'start' value lacks an 'id'. Nothing imported. Item: {item}")
if "id" not in item["end"]:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but its 'end' value lacks an 'id'. Nothing imported. Item: {item}")
# First, process all the nodes, and in the process create the id_shifting map
num_nodes_imported = 0
for item in json_list:
if item["type"] == "node":
if self.debug:
print("ADDING NODE: ", item)
print(f' Creating node with label `{item["labels"][0]}` and properties {item["properties"]}')
old_id = int(item["id"])
new_id = self.create_node_by_label_and_dict(item["labels"][0], item[
"properties"]) # TODO: Only the 1st label is used for now
id_shifting[old_id] = new_id
num_nodes_imported += 1
if self.debug:
print("id_shifting map:", id_shifting)
# Then process all the relationships, linking to the correct (newly-created) nodes by using the id_shifting map
num_rels_imported = 0
for item in json_list:
if item["type"] == "relationship":
if self.debug:
print("ADDING RELATIONSHIP: ", item)
rel_name = item["label"]
rel_props = item.get(
"properties") # Also works if no "properties" is present (relationships may lack it)
start_id_original = int(item["start"]["id"])
end_id_original = int(item["end"]["id"])
start_id_shifted = id_shifting[start_id_original]
end_id_shifted = id_shifting[end_id_original]
# print(f' Creating relationship named `{rel_name}` from node {start_id_shifted} to node {end_id_shifted}, with properties {rel_props}')
self.link_nodes_by_ids(start_id_shifted, end_id_shifted, rel_name, rel_props)
num_rels_imported += 1
return f"Successful import of {num_nodes_imported} node(s) and {num_rels_imported} relationship(s)"
############################################################################################
# #
# METHODS TO SUPPORT RDF PROCEDURES #
# #
############################################################################################
def rdf_generate_uri(self,
dct={},
include_label_in_uri=True,
prefix='neo4j://graph.schema#',
add_prefixes=[],
sep='/',
uri_prop='uri') -> None:
"""
A method that
- on the neo4j nodes with labels equal to keys of :dict dictionary
- sets additional label Resource (for handling in RDF)
- sets property with name :uri_prop with value that starts with prefix followed by a string
built by concatenating with separator :sep the list of :add_prefixes together with values of
properties on each node that are specified in the values of the :dict (different set for each Neo4j label)
Used for the purpose of being able to save and restore subgraphs using methods rdf_get_subgraph and
rdf_import_subgraph_inline.
:param dct: dictionary describing set of node properties that construct a primary key (and eventually uri) for that node
EXAMPLE1 (simple):
dct = {
'Vehicle': ['type', 'model'],
'Car': ['model', 'fuel']
}
generate_uri(dct)
will set property uri like 'neo4j://graph.schema#car/toyota' on nodes with labels Vehicle
(in case v.type == 'car' and v.model == 'toyota')
and set property uri like 'neo4j://graph.schema#toyota/petrol' on nodes with labels Car
(in case c.model == 'toyota' and v.fuel == 'petrol')
EXAMPLE2 (properties and neighbouring properties):
graph = CREATE (v:Vehicle{`producer`: 'Toyota'}),
(m:Model{`name`: 'Prius'}),
(v)-[:HAS_MODEL]->(m)
dct = {
"Vehicle": {"properties": "producer"},
"Model": {"properties": ["name"],
"neighbours": [
{"label": "Vehicle", "relationship": "HAS_MODEL", "property": producer"}
]
}
}
set URI on 'Vehicle' nodes using node's property "producer"
uri = 'neo4j://graph.schema#Vehicle/Toyota'
set URI on 'Model' nodes using node's property "name" and neighbouring node's property "producer"
uri = 'neo4j://graph.schema#Model/Toyota/Prius'
:param prefix: a prefix for uri
:param add_prefixes: list of prefixes to prepend uri with (after prefix), list joined with :sep separator
:param sep: separator for joining add_perfixes and the primary keys into uri
:return: None
"""
for label, config in dct.items():
assert isinstance(label, str)
assert any(isinstance(config, t) for t in [list, str, dict])
where = ""
neighbours = False
neighbours_query = ""
if isinstance(config, str):
properties_ext = [config]
elif isinstance(config, list):
properties_ext = config
elif isinstance(config, dict):
if 'properties' in config.keys():
if isinstance(config['properties'], str):
properties_ext = [config['properties']]
elif isinstance(config['properties'], list):
properties_ext = config['properties']
if 'neighbours' in config.keys():
assert isinstance(config['neighbours'], list), \
f"neighbours should be of type LIST [{{}}[, {{}}]] not {type(config['neighbours'])}"
for i, neighbour in enumerate(config['neighbours']):
if isinstance(neighbour, list): #if a list converting it to a dict as per req.
assert len(neighbour) == 3, \
f"each neighbour should be of length 3: [<label>, <relationship>, <property>] got: {neighbour}"
neighbour = {'label': neighbour[0], 'relationship': neighbour[1], 'property': neighbour[2]}
config['neighbours'][i] = neighbour
assert isinstance(neighbour, dict), \
f"each neighbour should be of type DICT not {type(neighbour)}"
for key in ['label', 'relationship', 'property']:
assert key in neighbour.keys(), f"{key} not found in {neighbour}"
neighbours = True
neighbours_query = """
WITH *
UNWIND apoc.coll.zip(range(0,size($neighbours)-1), $neighbours) as pair
WITH *, pair[0] as ind, pair[1] as neighbour
CALL apoc.path.expand(x, neighbour['relationship'], neighbour['label'], 1, 1)
YIELD path
WITH x, ind, nodes(path) as ind_neighbours
UNWIND ind_neighbours as nbr
WITH DISTINCT x, ind, nbr
WHERE x<>nbr
WITH *
ORDER BY x, ind, id(nbr)
WITH x, ind, collect(nbr) as coll
WITH x, ind, apoc.map.mergeList(coll) as nbr
WITH x, collect({index: ind, map: nbr}) as nbrs"""
if 'where' in config.keys():
where = config['where']
else:
properties_ext = []
cypher = f"""
MATCH (x:`{label}`)
{where}
{neighbours_query}
SET x:Resource
SET
x.
`{uri_prop}` = apoc.text.urlencode(
$prefix + apoc.text.join($add_prefixes + $opt_label +
{"[nbr in nbrs | nbr['map'][$neighbours[nbr['index']]['property']]] +" if neighbours else ""}
[prop in $properties | x[prop]], $sep)
)
"""
cypher_dict = {
'prefix': prefix,
'add_prefixes': add_prefixes,
'sep': sep,
'opt_label': ([label] if include_label_in_uri else []),
'properties': properties_ext
}
if neighbours:
cypher_dict.update({
'neighbours': config['neighbours']
})
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
self.query(cypher, cypher_dict)
self._rdf_uri_cleanup()
def rdf_get_subgraph(self, cypher: str, cypher_dict={}, format="Turtle-star") -> str:
"""
A method that returns an RDF serialization of a subgraph specified by :cypher query
:param cypher: cypher query to return a subgraph
:param cypher_dict: parameters required for the cypher query
:param format: RDF format in which to serialize output
:return: str - RDF serialization of subgraph
"""
self._rdf_subgraph_cleanup()
url = self.rdf_host + "neo4j/cypher"
j = ({'cypher': cypher, 'format': format, 'cypherParams': cypher_dict})
response = requests.post(
url=url,
json=j,
auth=self.credentials)
# TODO: switch to detached HTTP endpoint when code from neo4j is available
# see https://community.neo4j.com/t/export-procedure-that-returns-serialized-rdf/38781/2
return response.text
def rdf_import_fetch(self, url: str, format="Turtle-star"):
cypher = "CALL n10s.rdf.import.fetch ($url, $format) YIELD terminationStatus, triplesLoaded, triplesParsed, " \
"namespaces, extraInfo, callParams"
cypher_dict = {'url': url, 'format': format}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
return self.query(cypher, cypher_dict)
def rdf_import_subgraph_inline(self, rdf: str, format="Turtle-star"):
"""
A method that creates/merges appropriate nodes in Neo4j as specified in the provided :rdf string
The nodes will be MERGEd by 'uri' property
:param rdf: RDF serialization of Neo4j nodes and relationships
:param format: RDF serialization format
:return: returns a dictionary with keys triplesParsed, triplesLoaded as a summary of the operation
"""
assert self.rdf, "rdf option is not enabled at init of NeoInterface class"
if not self.autoconnect:
self.rdf_setup_connection()
cypher = """
CALL n10s.rdf.import.inline($rdf, $format)
YIELD triplesParsed, triplesLoaded, extraInfo
RETURN *
"""
# cypher_dict = {'rdf':rdf.encode('utf-8').decode('utf-8'), 'format': format}
cypher_dict = {'rdf': rdf, 'format': format}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
res = self.query(cypher, cypher_dict)
self._rdf_subgraph_cleanup()
if len(res) > 0:
return res[0]
else:
return {'triplesParsed': 0, 'triplesLoaded': 0, 'extraInfo': ''}
def _rdf_subgraph_cleanup(self):
# in case labels with spaces where serialized new labels with spaces being replaced with %20 could have been created
# this helper function is supposed to revert the change
cypher = """
UNWIND $labels as label
CALL apoc.refactor.rename.label(label, apoc.text.regreplace(label, '%20', ' '))
YIELD batches, failedBatches, total, failedOperations
RETURN batches, failedBatches, total, failedOperations
"""
cypher_dict = {'labels': [label for label in self.get_labels() if "%20" in label]}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
self.query(cypher, cypher_dict)
# in case properties with spaces where serialized new properties with spaces being replaced with %20 could have been created
# this helper function is supposed to revert the change
cypher2 = """
CALL db.schema.nodeTypeProperties() YIELD nodeLabels, propertyName
WHERE propertyName contains "%20"
CALL apoc.cypher.doIt(
'MATCH (node:`' + apoc.text.join(nodeLabels, '`:`') + '`) ' +
'WHERE "' + propertyName + '" in keys(node)' +
'SET node.`' + apoc.text.replace(propertyName, '%20', ' ') + '` = node.`' + propertyName + '`' +
'REMOVE node.`' + propertyName + '`'
,
{}
) YIELD value
RETURN value['node']
"""
cypher_dict2 = {}
if self.debug:
print(f"""
query: {cypher2}
parameters: {cypher_dict2}
""")
self.query(cypher2, cypher_dict2)
self._rdf_uri_cleanup()
def _rdf_uri_cleanup(self):
# URIs - replace selected encoded values with their original characters (for readability)
cypher3 = """
MATCH (n)
WHERE n.uri is not null
SET n.uri = apoc.text.replace(n.uri, '%23', '#')
SET n.uri = apoc.text.replace(n.uri, '%2F', '/')
SET n.uri = apoc.text.replace(n.uri, '%3A', ':')
"""
cypher_dict3 = {}
if self.debug:
print(f"""
query: {cypher3}
parameters: {cypher_dict3}
""")
self.query(cypher3, cypher_dict3)
def rdf_get_graph_onto(self):
"""
A method that returns an ontology autogenerated from existing nodes in Neo4j (provided by n10s(neosemantics) library
:return: str - serialized ontology
"""
assert self.rdf, "rdf option is not enabled at init of NeoInterface class"
url = self.rdf_host + "neo4j/onto"
response = requests.get(
url=url,
auth=self.credentials)
return response.text
|
from neo4j import GraphDatabase # The Neo4j python connectivity library "Neo4j Python Driver"
from neo4j import __version__ as neo4j_driver_version # The version of the Neo4j driver being used
import neo4j.graph # To check returned data types
import numpy as np
import pandas as pd
import inspect
import os
import requests
import re
import json
import time
from urllib.parse import quote
class NeoInterface:
"""
High level class to interact with neo4j from Python.
It provides a higher-level wrapper around the Neo4j python connectivity library "Neo4j Python Driver",
documented at: https://neo4j.com/docs/api/python-driver/current/api.html
SECTIONS IN THIS CLASS:
* INIT
* METHODS TO RUN GENERIC QUERIES
* METHODS TO RETRIEVE DATA
* METHODS TO CREATE/MODIFY SCHEMA
* METHODS TO CREATE/MODIFY DATA
* METHODS TO CREATE NEW RELATIONSHIPS
* METHODS TO READ IN DATA
* UTILITY METHODS
* METHODS TO SUPPORT DEBUGGING
* METHODS TO SUPPORT JSON IMPORT/EXPORT
* METHODS TO SUPPORT RDF PROCEDURES
AUTHORS:
<NAME> and <NAME>, GlaxoSmithKline
Based in part on Neo4jLiaison library (MIT License: https://github.com/BrainAnnex/neo4j-liaison)
"""
def __init__(self,
host=os.environ.get("NEO4J_HOST"),
credentials=(os.environ.get("NEO4J_USER"), os.environ.get("NEO4J_PASSWORD")),
apoc=False,
rdf=False,
rdf_host=None,
verbose=True,
debug=False,
autoconnect=True):
"""
If unable to create a Neo4j driver object, raise an Exception reminding the user to check whether the Neo4j database is running
:param host: URL to connect to database with. DEFAULT: read from NEO4J_HOST environmental variable
:param credentials: Pair of strings (tuple or list) containing, respectively, the database username and password
DEFAULT: read from NEO4J_USER and NEO4J_PASSWORD environmental variables
if None then no authentication is used
:param apoc: Flag indicating whether apoc library is used on Neo4j database to connect to
:param verbose: Flag indicating whether a verbose mode is to be used by all methods of this class
:param debug: Flag indicating whether a debug mode is to be used by all methods of this class
:param autoconnect Flag indicating whether the class should establish connection to database at initialization
"""
self.verbose = verbose
self.debug = debug
self.autoconnect = autoconnect
self.host = host
self.credentials = credentials
self.apoc = apoc
self.rdf = rdf
self.rdf_host = rdf_host
if self.verbose:
print("---------------- Initializing NeoInterface -------------------")
if self.autoconnect: # TODO: add test for autoconnect == False
# Attempt to create a driver object
self.connect()
# Extra initializations if APOC custom procedures (note: APOC must also be enabled on the database)
# if apoc:
# self.setup_all_apoc_custom()
# Extra initializations if RDF support required
if self.rdf:
self.rdf_setup_connection()
def connect(self) -> None:
try:
if self.credentials:
user, password = self.credentials # This unpacking will work whether the credentials were passed as a tuple or list
self.driver = GraphDatabase.driver(self.host, auth=(
user, password)) # Object to connect to Neo4j's Bolt driver for Python
else:
self.driver = GraphDatabase.driver(self.host,
auth=None) # Object to connect to Neo4j's Bolt driver for Python
if self.verbose:
print(f"Connection to {self.host} established")
except Exception as ex:
error_msg = f"CHECK IF NEO4J IS RUNNING! While instantiating the NeoInterface object, failed to create the driver: {ex}"
raise Exception(error_msg)
def rdf_config(self) -> None:
try:
self.query("CALL n10s.graphconfig.init({handleVocabUris:'IGNORE'});")
except:
if self.debug:
print("Config already created, make sure the config is correct")
self.create_constraint(label="Resource", key="uri", type="UNIQUE", name="n10s_unique_uri")
def rdf_setup_connection(self) -> None:
self.rdf_config()
if not self.rdf_host:
self.rdf_host = os.environ.get("NEO4J_RDF_HOST")
if not self.rdf_host:
bolt_port = re.findall(r'\:\d+', self.host)[0]
self.rdf_host = self.host.replace(bolt_port, ":7474").replace("bolt", "http").replace("neoj", "http")
self.rdf_host += ("" if self.rdf_host.endswith("/") else "/") + "rdf/"
try:
get_response = json.loads(requests.get(f"{self.rdf_host}ping", auth=self.credentials).text)
if self.verbose:
if "here!" in get_response.values():
print(f"Connection to {self.rdf_host} established")
except:
error_msg = f"CHECK IF RDF ENDPOINT IS SET UP CORRECTLY! While instantiating the NeoInterface object, failed to connect to {self.rdf_host}"
raise Exception(error_msg)
def version(self) -> str:
# Return the version of the Neo4j driver being used. EXAMPLE: "4.2.1"
return neo4j_driver_version
def close(self) -> None:
"""
Terminate the database connection.
Note: this method is automatically invoked after the last operation of a "with" statement
:return: None
"""
if self.driver is not None:
self.driver.close()
############################################################################################
# #
# METHODS TO RUN GENERIC QUERIES #
# #
############################################################################################
def query(self, q: str, params=None) -> []:
"""
Run a general Cypher query and return a list of dictionaries.
In cases of error, return an empty list.
A new session to the database driver is started, and then immediately terminated after running the query.
NOTE: if the Cypher query returns a node, and one wants to extract its internal Neo4j ID or labels
(in addition to all the properties and their values) then use query_expanded() instead.
:param q: A Cypher query
:param params: An optional Cypher dictionary
EXAMPLE, assuming that the cypher string contains the substrings "$node_id":
{'node_id': 20}
:return: A (possibly empty) list of dictionaries. Each dictionary in the list
will depend on the nature of the Cypher query.
EXAMPLES:
Cypher returns nodes (after finding or creating them): RETURN n1, n2
-> list item such as {'n1': {'gender': 'M', 'patient_id': 123}
'n2': {'gender': 'F', 'patient_id': 444}}
Cypher returns attribute values that get renamed: RETURN n.gender AS client_gender, n.pid AS client_id
-> list items such as {'client_gender': 'M', 'client_id': 123}
Cypher returns attribute values without renaming: RETURN n.gender, n.pid
-> list items such as {'n.gender': 'M', 'n.pid': 123}
Cypher returns a single computed value
-> a single list item such as {"count(n)": 100}
Cypher returns a single relationship, with or without attributes: MERGE (c)-[r:PAID_BY]->(p)
-> a single list item such as [{ 'r': ({}, 'PAID_BY', {}) }]
Cypher creates nodes (without returning them)
-> empty list
"""
# Start a new session, use it, and then immediately close it
with self.driver.session() as new_session:
result = new_session.run(q, params)
# Note: result is a neo4j.Result object;
# more specifically, an object of type neo4j.work.result.Result
# See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result
if result is None:
return []
data_as_list = result.data() # Return the result as a list of dictionaries.
# This must be done inside the "with" block,
# while the session is still open
return data_as_list
def query_expanded(self, q: str, params=None, flatten=False) -> []:
"""
Expanded version of query(), meant to extract additional info for queries that return Graph Data Types,
i.e. nodes, relationships or paths,
such as "MATCH (n) RETURN n", or "MATCH (n1)-[r]->(n2) RETURN r"
For example, if nodes were returned, and their Neo4j internal IDs and/or labels are desired
(in addition to all the properties and their values)
Unless the flatten flag is True, individual records are kept as separate lists.
For example, "MATCH (b:boat), (c:car) RETURN b, c"
will return a structure such as [ [b1, c1] , [b2, c2] ] if flatten is False,
vs. [b1, c1, b2, c2] if flatten is True. (Note: each b1, c1, etc, is a dictionary.)
TODO: Scenario to test:
if b1 == b2, would that still be [b1, c1, b1(b2), c2] or [b1, c1, c2] - i.e. would we remove the duplicates?
Try running with flatten=True "MATCH (b:boat), (c:car) RETURN b, c" on data like "CREATE (b:boat), (c1:car1), (c2:car2)"
:param q: A Cypher query
:param params: An optional Cypher dictionary
EXAMPLE, assuming that the cypher string contains the substring "$age":
{'age': 20}
:param flatten: Flag indicating whether the Graph Data Types need to remain clustered by record,
or all placed in a single flattened list.
:return: A (possibly empty) list of dictionaries, which will depend on which Graph Data Types
were returned in the Cypher query.
EXAMPLE - for a returned node
{'gender': 'M', 'age': 20, 'neo4j_id': 123, 'neo4j_labels': ['patient']}
EXAMPLE - for a returned relationship
{'price': 7500, 'neo4j_id': 2,
'neo4j_start_node': <Node id=11 labels=frozenset() properties={}>,
'neo4j_end_node': <Node id=14 labels=frozenset() properties={}>,
'neo4j_type': 'bought_by'}]
"""
# Start a new session, use it, and then immediately close it
with self.driver.session() as new_session:
result = new_session.run(q, params)
# Note: result is a neo4j.Result iterable object;
# more specifically, an object of type neo4j.work.result.Result
# See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result
if result is None:
return []
data_as_list = []
# The following must be done inside the "with" block, while the session is still open
for record in result:
# Note: record is a neo4j.Record object - an immutable ordered collection of key-value pairs.
# (the keys are the dummy names used for the nodes, such as "n")
# See https://neo4j.com/docs/api/python-driver/current/api.html#record
# EXAMPLE of record (if node n was returned):
# <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}>>
# (it has one key, "n")
# EXAMPLE of record (if node n and node c were returned):
# <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}>
# c=<Node id=66 labels=frozenset({'car'}) properties={'color': 'blue'}>>
# (it has 2 keys, "n" and "c")
data = []
for item in record:
# Note: item is a neo4j.graph.Node object
# OR a neo4j.graph.Relationship object
# OR a neo4j.graph.Path object
# See https://neo4j.com/docs/api/python-driver/current/api.html#node
# https://neo4j.com/docs/api/python-driver/current/api.html#relationship
# https://neo4j.com/docs/api/python-driver/current/api.html#path
# EXAMPLES of item:
# <Node id=95 labels=frozenset({'car'}) properties={'color': 'white', 'make': 'Toyota'}>
# <Relationship id=12 nodes=(<Node id=147 labels=frozenset() properties={}>, <Node id=150 labels=frozenset() properties={}>) type='bought_by' properties={'price': 7500}>
neo4j_properties = dict(item.items()) # EXAMPLE: {'gender': 'M', 'age': 99}
if isinstance(item, neo4j.graph.Node):
neo4j_properties["neo4j_id"] = item.id # Example: 227
neo4j_properties["neo4j_labels"] = list(item.labels) # Example: ['person', 'client']
elif isinstance(item, neo4j.graph.Relationship):
neo4j_properties["neo4j_id"] = item.id # Example: 227
neo4j_properties[
"neo4j_start_node"] = item.start_node # A neo4j.graph.Node object with "id", "labels" and "properties"
neo4j_properties[
"neo4j_end_node"] = item.end_node # A neo4j.graph.Node object with "id", "labels" and "properties"
# Example: <Node id=118 labels=frozenset({'car'}) properties={'color': 'white'}>
neo4j_properties["neo4j_type"] = item.type # The name of the relationship
elif isinstance(item, neo4j.graph.Path):
neo4j_properties["neo4j_nodes"] = item.nodes # The sequence of Node objects in this path
if flatten:
data_as_list.append(neo4j_properties)
else:
data.append(neo4j_properties)
if not flatten:
data_as_list.append(data)
return data_as_list
##################################################################################################
# #
# METHODS TO RETRIEVE DATA #
# #
##################################################################################################
def get_single_field(self, field_name: str, labels="", properties_condition=None, cypher_clause=None,
cypher_dict=None) -> list:
"""
For situations where one is fetching just 1 field,
and one desires a list of those values, rather than a dictionary of records.
In other respects, similar to the more general get_nodes()
EXAMPLES: fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"})
will RETURN a list of prices of all the Toyota models
fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"}, clause="n.price < 50000")
will RETURN a list of prices of all the Toyota models that cost less than 50000
:param field_name: A string with the name of the desired field (attribute)
For more information on the other parameters, see get_nodes()
:return: A list of the values of the field_name attribute in the nodes that match the specified conditions
"""
record_list = self.get_nodes(labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
single_field_list = [record.get(field_name) for record in record_list]
return single_field_list
def get_nodes(self, labels="", properties_condition=None, cypher_clause=None, cypher_dict=None,
return_nodeid=False, return_labels=False) -> [{}]:
"""
EXAMPLES:
get_nodes("") # Get ALL nodes
get_nodes("client")
get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"})
get_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000")
get_nodes("client", cypher_clause = "n.age > $some_age", cypher_dict = {"$some_age": 40})
get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} ,
cypher_clause = "n.age > 40 OR n.income < 50000")
RETURN a list of the records (as dictionaries of ALL the key/value node properties)
corresponding to all the Neo4j nodes with the specified label,
AND satisfying the given Cypher CLAUSE (if present),
AND exactly matching ALL of the specified property key/values pairs (if present).
I.e. an implicit AND operation.
IMPORTANT: nodes referred to in the Cypher clause must be specified as "n."
A dictionary of data binding (cypher_dict) for the Cypher clause may be optionally specified.
In case of conflict (any key overlap) between the dictionaries cypher_dict and properties_condition, and Exception is raised.
Optionally, the Neo4j internal node ID and label name(s) may also be obtained and returned.
:param labels: A string (or list/tuple of strings) specifying one or more Neo4j labels;
an empty string indicates that the match is to be carried out
across all labels - NOT RECOMMENDED for large databases!
(Note: blank spaces ARE allowed in the strings)
:param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40}
:param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n."
EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000"
EXAMPLE with data-binding: "n.age > $age OR n.income < $income"
(data-binding values are specified in cypher_dict)
:param properties_condition: A (possibly-empty) dictionary of property key/values pairs. Example: {"gender": "M", age: 64}
IMPORTANT: cypher_dict and properties_dict must have no overlapping keys, or an Exception will be raised
:param return_nodeid: Flag indicating whether to also include the Neo4j internal node ID in the returned data
(using "neo4j_id" as its key in the returned dictionary)
:param return_labels: Flag indicating whether to also include the Neo4j label names in the returned data
(using "neo4j_labels" as its key in the returned dictionary)
:return: A list whose entries are dictionaries with each record's information
(the node's attribute names are the keys)
EXAMPLE: [ {"gender": "M", "age": 42, "condition_id": 3},
{"gender": "M", "age": 76, "location": "Berkeley"}
]
Note that ALL the attributes of each node are returned - and that they may vary across records.
If the flag return_nodeid is set to True, then an extra key/value pair is included in the dictionaries,
of the form "neo4j_id": some integer with the Neo4j internal node ID
If the flag return_labels is set to True, then an extra key/value pair is included in the dictionaries,
of the form "neo4j_labels": [list of Neo4j label(s) attached to that node]
EXAMPLE using both of the above flags:
[ {"neo4j_id": 145, "neo4j_labels": ["person", "client"], "gender": "M", "age": 42, "condition_id": 3},
{"neo4j_id": 222, "neo4j_labels": ["person"], "gender": "M", "age": 76, "location": "Berkeley"}
]
# TODO: provide an option to specify the desired fields
"""
(cypher, cypher_dict) = self._match_nodes(labels=labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
cypher += " RETURN n"
if self.debug:
print(f"""
In get_nodes().
query: {cypher}
parameters: {cypher_dict}
""")
result_list = self.query_expanded(cypher, cypher_dict, flatten=True)
if return_nodeid and return_labels:
# If we want to return both 'neo4j_id' and 'neo4j_labels', we're done, because query_expanded() provides both
return result_list
# If we get thus far, it means that either the 'neo4j_id' or the 'neo4j_labels' attribute isn't wanted;
# remove the unwanted one from all the dictionaries in the elements of result_list
for node_dict in result_list:
if not return_nodeid:
del node_dict['neo4j_id']
if not return_labels:
del node_dict['neo4j_labels']
return result_list
def get_df(self, labels="", properties_condition=None, cypher_clause=None, cypher_dict=None,
return_nodeid=False, return_labels=False) -> pd.DataFrame:
"""
Same as get_nodes(), but the result is returned as a Pandas dataframe
[See get_nodes() for information about the arguments]
:param labels:
:param properties_condition:
:param cypher_clause:
:param cypher_dict:
:param return_nodeid:
:param return_labels:
:return: A Pandas dataframe
"""
result_list = self.get_nodes(labels=labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict,
return_nodeid=return_nodeid, return_labels=return_labels)
return pd.DataFrame(result_list)
def _match_nodes(self, labels, properties_condition=None, cypher_clause=None, cypher_dict=None) -> (str, dict):
"""
Turn a set of specification into the MATCH part of the Cypher query, and its data-binding dictionary.
EXAMPLES:
_match_nodes("client")
_match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"})
_match_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000")
_match_nodes("client", cypher_clause = "n.age > $age",
cypher_dict = {"$age": 40})
_match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} ,
cypher_clause = "n.age > 40 OR n.income < 50000")
RETURN the MATCH part of the Cypher query, and its data-binding dictionary,
corresponding to all the Neo4j nodes with the given label or labels (if specified),
AND satisfying the given Cypher CLAUSE (if specified, and optionally with the data-binding cypher_dict),
AND exactly matching ALL of the specified property key/values pairs (if specified).
I.e. an implicit AND operation.
Note: cypher_dict should not contain keys of the form `par_n`, where n is an integer, or an Exception might results.
:param labels: A string, or list/tuple of strings, of Neo4j labels (Note: blank spaces ARE allowed)
:param properties_condition: A (possibly-empty) dictionary of property key/values pairs.
Example: {"gender": "F", "age": 22}
If None or empty, no restrictions are placed on the match
:param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n."
EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000"
EXAMPLE with data-binding: "n.age > $age OR n.income < $income"
(data-binding values are specified in cypher_dict)
:param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40}
It should not contain any keys of the form `par_n`, where n is an integer
(those names are reserved for internal use)
:return: A pair consisting of the MATCH part of the Cypher query, and its data-binding dictionary
"""
if properties_condition is None:
clause_from_properties = ""
else:
# Transform the dictionary properties_condition into a string describing its corresponding Cypher clause,
# plus a corresponding data-binding dictionary.
# (assuming an implicit AND between equalities described by the dictionary terms),
#
# EXAMPLE:
# properties_condition: {"gender": "F", "year first met": 2003}
# will lead to:
# clause_from_properties = "{`gender`: $par_1, `year first met`: $par_2}"
# props_data_binding = {'par_1': "F", 'par_2': 2003}
(clause_from_properties, props_data_binding) = self.dict_to_cypher(properties_condition)
if cypher_dict is None:
cypher_dict = props_data_binding # The properties dictionary is to be used as the Cypher-binding dictionary
else:
# Merge the properties dictionary into the existing cypher_dict, PROVIDED that there's no conflict
overlap = cypher_dict.keys() & props_data_binding.keys() # Take the set intersection
if overlap != set(): # If not equal to the empty set
raise Exception(
f"`cypher_dict` should not contain any keys of the form `par_n` where n is an integer. "
f"Those names are reserved for internal use. Conflicting names: {overlap}")
cypher_dict.update(props_data_binding) # Merge the properties dictionary into the existing cypher_dict
if cypher_dict is None:
cypher_dict = {}
if cypher_clause is not None:
cypher_clause = cypher_clause.strip() # Zap any leading/trailing blanks
# Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher
cypher_labels = self._prepare_labels(labels)
# Construct the Cypher string
cypher = f"MATCH (n {cypher_labels} {clause_from_properties})"
if (cypher_clause != "") and (cypher_clause is not None):
cypher += f" WHERE {cypher_clause}"
return (cypher, cypher_dict)
def _prepare_labels(self, labels) -> str:
"""
Turn the given string, or list/tuple of strings - representing Neo4j labels - into a string
suitable for inclusion in a Cypher query.
Blanks ARE allowed in names.
EXAMPLES:
"client" gives rise to ":`client`"
["car", "car manufacturer"] gives rise to ":`car`:`car manufacturer`"
:param labels: A string, or list/tuple of strings, representing Neo4j labels
:return: A string suitable for inclusion in a Cypher query
"""
# Turn the label strings, or list/tuple of labels, into a string suitable for inclusion into Cypher
if labels == "":
return ""
if type(labels) == str:
labels = [labels]
cypher_labels = ""
for single_label in labels:
cypher_labels += f":`{single_label}`" # EXAMPLE: ":`label 1`:`label 2`"
return cypher_labels
def get_parents_and_children(self, node_id: int) -> {}:
"""
Fetch all the nodes connected to the given one by INbound relationships to it (its "parents"),
as well as by OUTbound relationships to it (its "children")
:param node_id: An integer with a Neo4j internal node ID
:return: A dictionary with 2 keys: 'parent_list' and 'child_list'
The values are lists of dictionaries with 3 keys: "id", "label", "rel"
EXAMPLE of individual items in either parent_list or child_list:
{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}
"""
with self.driver.session() as new_session:
# Fetch the parents
cypher = f"MATCH (parent)-[inbound]->(n) WHERE id(n) = {node_id} " \
"RETURN id(parent) AS id, labels(parent) AS labels, type(inbound) AS rel"
if self.debug:
print(f"""
query: {cypher}
""")
result_obj = new_session.run(cypher) # A new neo4j.Result object
parent_list = result_obj.data()
# EXAMPLE of parent_list:
# [{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'},
# {'id': 150, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}]
if self.verbose:
print(f"parent_list for node {node_id}:", parent_list)
# Fetch the children
cypher = f"MATCH (n)-[outbound]->(child) WHERE id(n) = {node_id} " \
"RETURN id(child) AS id, labels(child) AS labels, type(outbound) AS rel"
if self.debug:
print(f"""
query: {cypher}
""")
result_obj = new_session.run(cypher) # A new neo4j.Result object
child_list = result_obj.data()
# EXAMPLE of child_list:
# [{'id': 107, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'},
# {'id': 103, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'}]
if self.verbose:
print(f"child_list for node {node_id}:", child_list)
return {'parent_list': parent_list, 'child_list': child_list}
def get_labels(self) -> [str]:
"""
Extract and return a list of all the Neo4j labels present in the database.
No particular order should be expected.
TODO: test when there are nodes that have multiple labels
:return: A list of strings
"""
results = self.query("call db.labels() yield label return label")
return [x['label'] for x in results]
def get_relationshipTypes(self) -> [str]:
"""
Extract and return a list of all the Neo4j relationship types present in the database.
No particular order should be expected.
:return: A list of strings
"""
results = self.query("call db.relationshipTypes() yield relationshipType return relationshipType")
return [x['relationshipType'] for x in results]
def get_label_properties(self, label: str) -> list:
q = """
CALL db.schema.nodeTypeProperties()
YIELD nodeLabels, propertyName
WHERE $label in nodeLabels and propertyName IS NOT NULL
RETURN DISTINCT propertyName
ORDER BY propertyName
"""
params = {'label': label}
if self.debug:
print("q : ", q, " | params : ", params)
return [res['propertyName'] for res in self.query(q, params)]
#########################################################################################
# #
# METHODS TO GET/CREATE/MODIFY SCHEMA #
# #
#########################################################################################
def get_indexes(self, types=None) -> pd.DataFrame:
"""
Return all the database indexes, and some of their attributes,
as a Pandas dataframe.
Optionally restrict the type (such as "BTREE") of indexes returned.
EXAMPLE:
labelsOrTypes name properties type uniqueness
0 [my_label] index_23b0962b [my_property] BTREE NONUNIQUE
1 [my_label] some_name [another_property] BTREE UNIQUE
:param types: Optional list to of types to limit the result to
:return: A (possibly-empty) Pandas dataframe
"""
if types:
where = "with * where type in $types" # Define a restrictive clause
else:
types = []
where = ""
q = f"""
call db.indexes()
yield name, labelsOrTypes, properties, type, uniqueness
{where}
return *
"""
results = self.query(q, {"types": types})
if len(results) > 0:
return pd.DataFrame(list(results))
else:
return pd.DataFrame([], columns=['name'])
def get_constraints(self) -> pd.DataFrame:
"""
Return all the database constraints, and some of their attributes,
as a Pandas dataframe with 3 columns:
name EXAMPLE: "my_constraint"
description EXAMPLE: "CONSTRAINT ON ( patient:patient ) ASSERT (patient.patient_id) IS UNIQUE"
details EXAMPLE: "Constraint( id=3, name='my_constraint', type='UNIQUENESS',
schema=(:patient {patient_id}), ownedIndex=12 )"
:return: A (possibly-empty) Pandas dataframe
"""
q = """
call db.constraints()
yield name, description, details
return *
"""
results = self.query(q)
if len(results) > 0:
return pd.DataFrame(list(results))
else:
return pd.DataFrame([], columns=['name'])
def create_index(self, label: str, key: str) -> bool:
"""
Create a new database index, unless it already exists,
to be applied to the specified label and key (property).
The standard name given to the new index is of the form label.key
EXAMPLE - to index nodes labeled "car" by their key "color":
create_index("car", "color")
This new index - if not already in existence - will be named "car.color"
If an existing index entry contains a list of labels (or types) such as ["l1", "l2"] ,
and a list of properties such as ["p1", "p2"] ,
then the given pair (label, key) is checked against ("l1_l2", "p1_p2"), to decide whether it already exists.
:param label: A string with the node label to which the index is to be applied
:param key: A string with the key (property) name to which the index is to be applied
:return: True if a new index was created, or False otherwise
"""
existing_indexes = self.get_indexes() # A Pandas dataframe with info about indexes;
# in particular 2 columns named "labelsOrTypes" and "properties"
# Index is created if not already exists.
# a standard name for the index is assigned: `{label}.{key}`
existing_standard_name_pairs = list(existing_indexes.apply(
lambda x: ("_".join(x['labelsOrTypes']), "_".join(x['properties'])), axis=1)) # Proceed by row
"""
For example, if the Pandas dataframe existing_indexes contains the following columns:
labelsOrTypes properties
0 [car] [color, make]
1 [person] [sex]
then existing_standard_names will be: [('car', 'color_make'), ('person', 'sex')]
"""
if (label, key) not in existing_standard_name_pairs:
q = f'CREATE INDEX `{label}.{key}` FOR (s:`{label}`) ON (s.`{key}`)'
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
return True
else:
return False
def create_constraint(self, label: str, key: str, type="UNIQUE", name=None) -> bool:
"""
Create a uniqueness constraint for a node property in the graph,
unless a constraint with the standard name of the form `{label}.{key}.{type}` is already present
Note: it also creates an index, and cannot be applied if an index already exists.
EXAMPLE: create_constraint("patient", "patient_id")
:param label: A string with the node label to which the constraint is to be applied
:param key: A string with the key (property) name to which the constraint is to be applied
:param type: For now, the default "UNIQUE" is the only allowed option
:param name: Optional name to give to the new constraint; if not provided, a
standard name of the form `{label}.{key}.{type}` is used. EXAMPLE: "patient.patient_id.UNIQUE"
:return: True if a new constraint was created, or False otherwise
"""
assert type == "UNIQUE"
# TODO: consider other types of constraints
existing_constraints = self.get_constraints()
# constraint is created if not already exists.
# a standard name for a constraint is assigned: `{label}.{key}.{type}` if name was not provided
cname = (name if name else f"`{label}.{key}.{type}`")
if cname in list(existing_constraints['name']):
return False
try:
q = f'CREATE CONSTRAINT {cname} ON (s:`{label}`) ASSERT s.`{key}` IS UNIQUE'
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
# Note: creation of a constraint will crash if another constraint, or index, already exists
# for the specified label and key
return True
except Exception:
return False
def drop_index(self, name: str) -> bool:
"""
Eliminate the index with the specified name.
:param name: Name of the index to eliminate
:return: True if successful or False otherwise (for example, if the index doesn't exist)
"""
try:
q = f"DROP INDEX `{name}`"
if self.debug:
print(f"""
query: {q}
""")
self.query(q) # Note: it crashes if the index doesn't exist
return True
except Exception:
return False
def drop_all_indexes(self, including_constraints=True) -> None:
"""
Eliminate all the indexes in the database and, optionally, also get rid of all constraints
:param including_constraints: Flag indicating whether to also ditch all the constraints
:return: None
"""
if including_constraints:
if self.apoc:
self.query("call apoc.schema.assert({},{})")
else:
self.drop_all_constraints()
indexes = self.get_indexes()
for name in indexes['name']:
self.drop_index(name)
def drop_constraint(self, name: str) -> bool:
"""
Eliminate the constraint with the specified name.
:param name: Name of the constraint to eliminate
:return: True if successful or False otherwise (for example, if the constraint doesn't exist)
"""
try:
q = f"DROP CONSTRAINT `{name}`"
if self.debug:
print(f"""
query: {q}
""")
self.query(q) # Note: it crashes if the constraint doesn't exist
return True
except Exception:
return False
def drop_all_constraints(self) -> None:
"""
Eliminate all the constraints in the database
:return: None
"""
constraints = self.get_constraints()
for name in constraints['name']:
if not (self.rdf and name == 'n10s_unique_uri'):
self.drop_constraint(name)
#####################################################################################
# #
# METHODS TO CREATE/MODIFY DATA #
# #
#####################################################################################
def create_node_by_label_and_dict(self, labels, properties=None) -> int:
"""
Create a new node with the given label and with the attributes/values specified in the items dictionary
Return the Neo4j internal ID of the node just created.
:param labels: A string, or list/tuple of strings, of Neo4j label (ok to include blank spaces)
:param properties: An optional dictionary of properties to set for the new node.
EXAMPLE: {'age': 22, 'gender': 'F'}
:return: An integer with the Neo4j internal ID of the node just created
"""
if properties is None:
properties = {}
# From the dictionary of attribute names/values,
# create a part of a Cypher query, with its accompanying data dictionary
(attributes_str, data_dictionary) = self.dict_to_cypher(properties)
# EXAMPLE:
# attributes_str = '{`cost`: $par_1, `item description`: $par_2}'
# data_dictionary = {'par_1': 65.99, 'par_2': 'the "red" button'}
# Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher
cypher_labels = self._prepare_labels(labels)
# Assemble the complete Cypher query
cypher = f"CREATE (n {cypher_labels} {attributes_str}) RETURN n"
if self.debug:
print(f"""
In create_node_by_label_and_dict().
query: {cypher}
parameters: {data_dictionary}
""")
result_list = self.query_expanded(cypher, data_dictionary, flatten=True)
return result_list[0]['neo4j_id'] # Return the Neo4j internal ID of the node just created
def delete_nodes_by_label(self, delete_labels=None, keep_labels=None, batch_size=50000) -> None:
"""
Empty out (by default completely) the Neo4j database.
Optionally, only delete nodes with the specified labels, or only keep nodes with the given labels.
Note: the keep_labels list has higher priority; if a label occurs in both lists, it will be kept.
IMPORTANT: it does NOT clear indexes; "ghost" labels may remain! To get rid of those, run drop_all_indexes()
:param delete_labels: An optional string, or list of strings, indicating specific labels to DELETE
:param keep_labels: An optional string or list of strings, indicating specific labels to KEEP
(keep_labels has higher priority over delete_labels)
:return: None
"""
if (delete_labels is None) and (keep_labels is None):
# Delete ALL nodes AND ALL relationship from the database; for efficiency, do it all at once
if self.verbose:
print(f" --- Deleting all nodes in the database ---")
if batch_size: # In order to avoid memory errors, delete data in batches
q = f"""
call apoc.periodic.iterate(
'MATCH (n) RETURN n',
'DETACH DELETE(n)',
{{batchSize:{str(batch_size)}, parallel:false}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
else:
q = "MATCH (n) DETACH DELETE(n)"
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
return
if not delete_labels:
delete_labels = self.get_labels() # If no specific labels to delete were given,
# then consider all labels for possible deletion (unless marked as "keep", below)
else:
if type(delete_labels) == str:
delete_labels = [delete_labels] # If a string was passed, turn it into a list
if not keep_labels:
keep_labels = [] # Initialize list of labels to keep, if not provided
else:
if type(keep_labels) == str:
keep_labels = [keep_labels] # If a string was passed, turn it into a list
# Delete all nodes with labels in the delete_labels list,
# EXCEPT for any label in the keep_labels list
for label in delete_labels:
if not (label in keep_labels):
if self.verbose:
print(f" --- Deleting nodes with label: `{label}` ---")
q = f"MATCH (x:`{label}`) DETACH DELETE x"
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def clean_slate(self, keep_labels=None, drop_indexes=True, drop_constraints=True) -> None:
"""
Use this to get rid of absolutely everything in the database.
Optionally, keep nodes with a given label, or keep the indexes, or keep the constraints
:param keep_labels: An optional list of strings, indicating specific labels to KEEP
:param drop_indexes: Flag indicating whether to also ditch all indexes (by default, True)
:param drop_constraints:Flag indicating whether to also ditch all constraints (by default, True)
:return: None
"""
if self.rdf:
self.delete_nodes_by_label(
keep_labels=(keep_labels + ['_GraphConfig'] if keep_labels else ['_GraphConfig']))
else:
self.delete_nodes_by_label(keep_labels=keep_labels)
if drop_indexes:
self.drop_all_indexes(including_constraints=drop_constraints)
def set_fields(self, labels, set_dict, properties_condition=None, cypher_clause=None, cypher_dict=None) -> None:
"""
EXAMPLE - locate the "car" with vehicle id 123 and set its color to white and price to 7000
set_fields(labels = "car", set_dict = {"color": "white", "price": 7000},
properties_condition = {"vehicle id": 123})
LIMITATION: blanks are allowed in the keys of properties_condition, but not in those of set_dict
:param labels: A string, or list/tuple of strings, representing Neo4j labels
:param set_dict: A dictionary of field name/values to create/update the node's attributes
(note: no blanks are allowed in the keys)
:param properties_condition:
:param cypher_clause:
:param cypher_dict:
:return: None
"""
(cypher_match, cypher_dict) = self._match_nodes(labels, properties_condition=properties_condition,
cypher_clause=cypher_clause, cypher_dict=cypher_dict)
set_list = []
for field_name, field_value in set_dict.items(): # field_name, field_value are key/values in set_dict
set_list.append("n.`" + field_name + "` = $" + field_name) # Example: "n.`field1` = $field1"
cypher_dict[field_name] = field_value # Extend the Cypher data-binding dictionary
# Example of data_binding at the end of the loop: {"field1": value1, "field2": value2}
set_clause = "SET " + ", ".join(set_list) # Example: "SET n.field1 = $field1, n.field2 = $field2"
cypher = cypher_match + set_clause
# Example of cypher:
# "MATCH (n:car {`vehicle id`: $par_1}) SET n.`color` = color, n.`price` = $field2"
# Example of data binding:
# {"par_1": 123, "color": "white", "price": 7000}
if self.debug:
print("cypher: ", cypher)
print("data_binding: ", cypher_dict)
self.query(cypher, cypher_dict)
def extract_entities(self,
mode='merge',
label=None,
cypher=None,
cypher_dict=None,
target_label=None,
property_mapping={},
relationship=None,
direction='<'
):
"""
:param mode:str; assert mode in ['merge', 'create']
:param label:str; label of the nodes to extract data from
:param cypher: str; only of label not provided: cypher that returns id(node) of the nodes to extract data from
EXAMPLE:
cypher = '''
MATCH (f:`Source Data Table`{{_domain_:$domain}})-[:HAS_DATA]->(node:`Source Data Row`)
RETURN id(node)
'''
:param cypher_dict: None/dict parameters required for the cypher query
EXAMPLE:
cypher_dict={'domain':'ADSL'}
:param target_label: label(s) of the newly created nodes with extracted data: list or str
:param property_mapping: dict or list
if dict: keys correspond to the property names of source data (e.g. Source Data Row) and values correspond
to to the property names of the target class where the data is extracted to
if list: properties of the extracted node (as per the list) will extracted and will be named same as
in the source node
:param relationship: type of the relationship (to/from the extraction node) to create
:param direction: direction of the relationship to create (>: to the extraction node, <: from the extraction node)
:return: None
"""
assert mode in ['merge', 'create']
assert direction in ['>', '<']
assert type(property_mapping) in [dict, list]
assert type(target_label) in [list, str] or target_label is None
if target_label:
if type(target_label) == str:
target_label = [target_label]
if type(property_mapping) == list:
property_mapping = {k: k for k in property_mapping}
for key in property_mapping.keys():
for lbl in target_label:
self.create_index(lbl, key)
self.create_index(label, key)
q_match_part = f"MATCH (data:`{label}`) RETURN data"
q_match_altered = False
if cypher:
if not cypher_dict:
cypher_dict = {}
all = [x[1:] for x in re.findall(r'\$\w+\b', cypher)]
missing_params = set(all) - set(cypher_dict.keys())
if not missing_params:
q_match_part = """
CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value
MATCH (data) WHERE id(data) = value['id(node)']
RETURN data
"""
q_match_altered = True
else:
if self.debug:
print("ERROR: not all parameters have been supplied in cypher_dict, missing: ", missing_params)
rel_left = ('' if direction == '>' else '<')
rel_right = ('>' if direction == '>' else '')
q = f"""
call apoc.periodic.iterate(
$q_match_part
,
'
WITH data, apoc.coll.intersection(keys($mapping), keys(data)) as common_keys
{("" if mode == "create" else "WHERE size(common_keys) > 0")}
WITH data, apoc.map.fromLists([key in common_keys | $mapping[key]], [key in common_keys | data[key]]) as submap
call apoc.{mode}.node($target_label, submap) YIELD node MERGE (data){rel_left}-[:`{relationship}`]-{rel_right}(node)
',
{{batchSize:10000, parallel:false, params: $inner_params}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
inner_params = {'target_label': target_label,
'mapping': property_mapping}
if q_match_altered:
inner_params = {**inner_params, 'cypher': cypher, 'cypher_dict': cypher_dict}
params = {'q_match_part': q_match_part, 'target_label': target_label, 'inner_params': inner_params}
res = self.query(q, params)
if self.debug:
print(" Query : ", q)
print(" Query parameters: ", params)
print(" Result of above query : ", res, "\n")
#########################################################################################
# #
# METHODS TO CREATE NEW RELATIONSHIPS #
# #
#########################################################################################
def link_entities(self,
left_class: str,
right_class: str,
relationship="_default_",
cond_via_node=None,
cond_left_rel=None,
cond_right_rel=None,
cond_cypher=None,
cond_cypher_dict=None):
"""
Creates relationship of type {relationship} ...
:param left_class: Name of the left class
:param right_class: Name of the right class
:param relationship: Name to give the relationship (if None: will use name of right_class (f'HAS_{right_class.upper())')
:param cond_via_node: Name of central node from which relationships will be created
:param cond_left_rel: Name and direction of relationship from right_class (e.g. FROM_DATA> or <FROM_DATA)
:param cond_right_rel: Name and direction of relationship from left_class (e.g. FROM_DATA> or <FROM_DATA)
:param cond_cypher: (optional) - if not None: cond_via_node, cond_left_rel, cond_right_rel will be ignored
instead the cypher query will be run which return nodes 'left' and 'right' to be linked
with relationship of type {relationship}
:param cond_cypher_dict: parameters required for the cypher query
"""
# checking compliance of provided parameters
if not cond_cypher:
assert not (cond_left_rel.startswith("<") and cond_left_rel.endswith(">"))
assert not (cond_right_rel.startswith("<") and cond_right_rel.endswith(">"))
if relationship == '_default_':
relationship = f'HAS_{right_class.upper()}'
cond_left_rel_mark1 = ""
cond_left_rel_mark2 = ""
if cond_left_rel.startswith("<"):
cond_left_rel_mark1 = "<"
if cond_left_rel.endswith(">"):
cond_left_rel_mark2 = ">"
cond_left_rel_type = re.sub(r'^(\<)?(.*?)(\>)?$', r'\2', cond_left_rel)
cond_right_rel_mark1 = ""
cond_right_rel_mark2 = ""
if cond_right_rel.startswith("<"):
cond_right_rel_mark1 = "<"
if cond_right_rel.endswith(">"):
cond_right_rel_mark2 = ">"
cond_right_rel_type = re.sub(r'^(\<)?(.*?)(\>)?$', r'\2', cond_right_rel)
if cond_cypher:
if self.verbose:
print(
f"Using cypher condition to link nodes. Labels: {left_class}, {right_class}; Cypher: {cond_cypher}")
periodic_part1 = """
CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value
RETURN value.`left` as left, value.`right` as right
"""
else:
periodic_part1 = f'''
MATCH (left){cond_left_rel_mark1}-[:`{cond_left_rel_type}`*0..1]-{cond_left_rel_mark2}(sdr:`{cond_via_node}`),
(sdr){cond_right_rel_mark1}-[:`{cond_right_rel_type}`*0..1]-{cond_right_rel_mark2}(right)
WHERE left:`{left_class}` and right:`{right_class}`
RETURN left, right
'''
q = f"""
call apoc.periodic.iterate(
'{periodic_part1}',
'
MERGE (left)-[:`{relationship}`]->(right)
',
{{batchSize:10000, parallel:false, params: {{cypher: $cypher, cypher_dict: $cypher_dict}}}})
YIELD total, batches, failedBatches
RETURN total, batches, failedBatches
"""
params = {'cypher': cond_cypher, 'cypher_dict': cond_cypher_dict}
if self.debug:
print(" Query : ", q)
print(" Query parameters: ", params)
self.query(q, params)
def link_nodes_on_matching_property(self, label1: str, label2: str, property1: str, rel: str,
property2=None) -> None:
"""
Locate any pair of Neo4j nodes where all of the following hold:
1) the first one has label1
2) the second one has label2
3) the two nodes agree in the value of property1 (if property2 is None),
or in the values of property1 in the 1st node and property2 in the 2nd node
For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node,
unless already present
:param label1: A string against which the label of the 1st node must match
:param label2: A string against which the label of the 2nd node must match
:param property1: Name of property that must be present in the 1st node (and also in 2nd node, if property2 is None)
:param property2: Name of property that must be present in the 2nd node (may be None)
:param rel: Name to give to all relationships that get created
:return: None
"""
if not property2:
property2 = property1
q = f'''MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{property1}` = y.`{property2}`
MERGE (x)-[:{rel}]->(y)'''
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def link_nodes_on_matching_property_value(self, label1: str, label2: str, prop_name: str, prop_value: str,
rel: str) -> None:
"""
Locate any pair of Neo4j nodes where all of the following hold:
1) the first one has label1
2) the second one has label2
3) both nodes have a property with the specified name
4) the string values of the properties in (3) in the two nodes are both equal to the specified value
For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node,
unless already present
:param label1: A string against which the label of the 1st node must match
:param label2: A string against which the label of the 2nd node must match
:param prop_name: Name of property that must be present in both nodes
:param prop_value: A STRING value that the above property must have in both nodes
:param rel: Name to give to all relationships that get created
:return: None
"""
q = f'''MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{prop_name}` = "{prop_value}" AND y.`{prop_name}` = "{prop_value}"
MERGE (x)-[:{rel}]->(y)'''
if self.debug:
print(f"""
query: {q}
""")
self.query(q)
def link_nodes_by_ids(self, node_id1: int, node_id2: int, rel: str, rel_props=None) -> None:
"""
Locate the pair of Neo4j nodes with the given Neo4j internal ID's.
If they are found, add a relationship - with the name specified in the rel argument,
and with the specified optional properties - from the 1st to 2nd node, unless already present
TODO: maybe return the Neo4j ID of the relationship just created
:param node_id1: An integer with the Neo4j internal ID to locate the 1st node
:param node_id2: An integer with the Neo4j internal ID to locate the 2nd node
:param rel: A string specifying a Neo4j relationship name
:param rel_props: Optional dictionary with the relationship properties. EXAMPLE: {'since': 2003, 'code': 'xyz'}
:return: None
"""
cypher_rel_props, cypher_dict = self.dict_to_cypher(rel_props) # Process the optional relationship properties
# EXAMPLE of cypher_rel_props: '{cost: $par_1, code: $par_2}' (possibly blank)
# EXAMPLE of cypher_dict: {'par_1': 65.99, 'par_2': 'xyz'} (possibly empty)
q = f"""
MATCH (x), (y)
WHERE id(x) = $node_id1 and id(y) = $node_id2
MERGE (x)-[:`{rel}` {cypher_rel_props}]->(y)
"""
# Extend the (possibly empty) Cypher data dictionary, to also include a value for the key "node_id1" and "node_id2"
cypher_dict["node_id1"] = node_id1
cypher_dict["node_id2"] = node_id2
if self.debug:
print(f"""
query: {q}
parameters: {cypher_dict}
""")
self.query(q, cypher_dict)
#####################################################################################################
# #
# METHODS TO READ IN DATA #
# #
#####################################################################################################
def load_df(
self,
df: pd.DataFrame,
label: str,
merge=False,
primary_key=None,
merge_overwrite=False,
rename=None,
max_chunk_size=10000) -> list:
"""
Load a Pandas data frame into Neo4j.
Each line is loaded as a separate node.
TODO: maybe save the Panda data frame's row number as an attribute of the Neo4j nodes, to ALWAYS have a primary key
:param df: A Pandas data frame to import into Neo4j
:param label: String with a Neo4j label to use on the newly-created nodes
:param merge: If True, records are replaced, rather than added, if already present;
if False, always added
:param primary_key: Only applicable when merging. String with the name of the field that
serves as a primary key; if a new record with that field is to be added,
it'll replace the current one
TODO: to allow for list of primary_keys
:param merge_overwrite: If True then on merge the existing nodes will be overwritten with the new data,
otherwise they will be updated with new information (keys that are not present in the df
will not be deleted)
:param rename: Optional dictionary to rename the Pandas dataframe's columns to
EXAMPLE {"current_name": "name_we_want"}
:param max_chunk_size: To limit the number of rows loaded at one time
:return: List of node ids, created in the operation
"""
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
if rename is not None:
df = df.rename(rename, axis=1) # Rename the columns in the Pandas data frame
primary_key_s = ''
if primary_key is not None:
neo_indexes = self.get_indexes()
if f"{label}.{primary_key}" not in list(neo_indexes['name']):
self.create_index(label, primary_key)
time.sleep(1) # sleep to give Neo4j time to populate the index
primary_key_s = '{' + f'`{primary_key}`:record[\'{primary_key}\']' + '}'
# EXAMPLE of primary_key_s: "{patient_id:record['patient_id']}"
op = 'MERGE' if (merge and primary_key) else 'CREATE' # A MERGE or CREATE operation, as needed
res = []
for df_chunk in np.array_split(df, int(len(df.index) / max_chunk_size) + 1): # Split the operation into batches
cypher = f'''
WITH $data AS data
UNWIND data AS record {op} (x:`{label}`{primary_key_s})
SET x{('' if merge_overwrite else '+')}=record
RETURN id(x) as node_id
'''
cypher_dict = {'data': df_chunk.to_dict(orient='records')}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
res_chunk = self.query(cypher, cypher_dict)
if res_chunk:
res += [r['node_id'] for r in res_chunk]
return res
def load_dict(self, dct: dict, label="Root", rel_prefix="", maxdepth=10):
"""
Loads python dict to Neo4j (auto-unpacking hierarchy)
Children of type dict converted into related nodes with relationship {rel_prefix}_{key}
Children of type list (of dict or other) converted into:
- multiple related nodes for list items of type dict
- properties of parent node of type list in case list items
see example in tests.test_json.test_import_custom_json
:param dct: python dict to load
:param label: label to assign to the root node
:param rel_prefix: prefix to add to relationship name from parent to child
:param maxdepth: maximum possible depth(of children) of dict
:return: None
"""
# initial load of the complete json as a node
j = json.dumps(dct)
self.query(
"""
CALL apoc.merge.node(['JSON',$label],{value:$value})
YIELD node
RETURN node
"""
,
{'label': label, 'value': j}
)
i = 0
# unpacking hierarchy (looping until no nodes with JSON label are left or maxdepth reached
while (self.query("MATCH (j:JSON) RETURN j LIMIT 1")) and i < maxdepth:
self.query("""
MATCH (j:JSON)
WITH j, apoc.convert.fromJsonMap(j.value) as map
WITH j, map, keys(map) as ks UNWIND ks as k
call apoc.do.case([
apoc.meta.type(map[k]) = 'MAP'
,
'
CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson($map[$k])}) YIELD node
CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel
RETURN node, rel
'
,
apoc.meta.type(map[k]) = 'LIST'
,
'
//first setting LIST property on main node
WITH j, map, k, [i in map[k] WHERE apoc.meta.type(i) <> "MAP"] as not_map_lst
call apoc.do.when(
size(not_map_lst) <> 0,
"call apoc.create.setProperty([j], $k, $not_map_lst) YIELD node RETURN node",
"RETURN j",
{j:j, k:k, not_map_lst:not_map_lst}
) YIELD value
WITH *, [i in map[k] WHERE NOT i IN not_map_lst] as map_lst
UNWIND map_lst as item_map
CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson(item_map)}) YIELD node
CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel
RETURN node, rel
'
]
,
'
call apoc.create.setProperty([j], $k, $map[$k]) YIELD node
RETURN node
'
,
{k: k, map: map, j: j, rel_prefix: $rel_prefix}
) YIELD value
WITH DISTINCT j
REMOVE j:JSON
REMOVE j.value
""", {"rel_prefix": rel_prefix})
i += 1
def load_arrows_dict(self, dct: dict, merge_on=None, always_create=None, timestamp=False):
"""
Loads data created in prototyping tool https://arrows.app/
Uses MERGE statement separately on each node and each relationship using all properties as identifying properties
Example of use:
with open("arrows.json", 'r') as jsonfile:
dct = json.load(jsonfile)
neo = NeoInterface()
neo.load_arrows_dict(dct)
:param dct: python dict to load
:param merge_on: None or dict with label as key and list of properties as value - the properties will be used
as identProps in apoc.merge.node, the rest of the properties will be used as onCreateProps and onMatchProps
:return: result of the corresponding Neo4j query
"""
assert merge_on is None or isinstance(merge_on, dict)
if not merge_on:
merge_on = {}
for key, item in merge_on.items():
assert isinstance(item, list)
assert always_create is None or isinstance(always_create, list)
# if merge_on:
q = """
UNWIND $map['nodes'] as nd
WITH *, apoc.coll.intersection(nd['labels'], keys($merge_on)) as hc_labels // list of relevant labels from the merge_on map
WITH *, apoc.coll.toSet(apoc.coll.flatten(apoc.map.values($merge_on, hc_labels))) as hc_props // list of relevant properties
WITH *, [prop in hc_props WHERE prop in keys(nd['properties'])] as hc_props // filter to keep only the existing ones
WITH
*,
CASE WHEN size(nd['labels']) = 0 THEN
['No Label']
ELSE
nd['labels']
END as labels,
CASE WHEN size(hc_props) > 0 THEN
{
identProps:
CASE WHEN size(apoc.coll.intersection(keys(nd['properties']), hc_props)) = 0 and nd['caption'] <> '' THEN
{value: nd['caption']}
ELSE
apoc.map.submap(nd['properties'], hc_props)
END
,
onMatchProps: apoc.map.submap(nd['properties'], [key in keys(nd['properties'])
WHERE NOT key IN hc_props])
}
ELSE
{
identProps:
CASE WHEN size(keys(nd['properties'])) = 0 and nd['caption'] <> '' THEN
{value: nd['caption']}
ELSE
nd['properties']
END
,
onMatchProps: {}
}
END as props
WITH
nd,
labels,
props['identProps'] as identProps,
props['onMatchProps'] as onMatchProps,
props['onMatchProps'] as onCreateProps //TODO: change if these need to differ in the future
//dummy property if no properties are ident
WITH *, CASE WHEN identProps = {} THEN {_dummy_prop_:1} ELSE identProps END as identProps
""" + \
("""
WITH
*,
apoc.map.mergeList([onCreateProps, {_timestamp: timestamp()}]) as onCreateProps,
apoc.map.mergeList([onMatchProps, {_timestamp: timestamp()}]) as onMatchProps
""" if timestamp else "") + \
"""
CALL apoc.do.when(
size(apoc.coll.intersection(labels, $always_create)) > 0,
"CALL apoc.create.node($labels, apoc.map.mergeList([$identProps, $onMatchProps, $onCreateProps])) YIELD node RETURN node",
"CALL apoc.merge.node($labels, $identProps, $onMatchProps, $onCreateProps) YIELD node RETURN node",
{labels: labels, identProps:identProps, onMatchProps:onMatchProps, onCreateProps:onCreateProps}
) yield value as value2
WITH *, value2['node'] as node
//eliminating dummy property
CALL apoc.do.when(
identProps = {_dummy_prop_: 1},
'REMOVE node._dummy_prop_ RETURN node',
'RETURN node',
{node: node}
) YIELD value
WITH *
WITH apoc.map.fromPairs(collect([nd['id'], node])) as node_map
UNWIND $map['relationships'] as rel
call apoc.merge.relationship(
node_map[rel['fromId']],
CASE WHEN rel['type'] = '' OR rel['type'] IS NULL THEN 'RELATED' ELSE rel['type'] END,
rel['properties'],
{},
node_map[rel['toId']], {}
)
YIELD rel as relationship
WITH node_map, apoc.map.fromPairs(collect([rel['id'], relationship])) as rel_map
RETURN node_map, rel_map
"""
params = {
'map': dct,
'merge_on': (merge_on if merge_on else {}),
'always_create': (always_create if always_create else [])
}
res = self.query(q, params)
if res:
return res[0]
else:
return None
############################################################################################
# #
# UTILITY METHODS #
# #
############################################################################################
def dict_to_cypher(self, data_dict: {}) -> (str, {}):
"""
Turn a Python dictionary (meant for specifying node or relationship attributes)
into a string suitable for Cypher queries,
plus its corresponding data-binding dictionary.
EXAMPLE :
{'cost': 65.99, 'item description': 'the "red" button'}
will lead to
(
'{`cost`: $par_1, `item description`: $par_2}',
{'par_1': 65.99, 'par_2': 'the "red" button'}
)
Note that backticks are used in the Cypher string to allow blanks in the key names.
Consecutively-named dummy variables ($par_1, $par_2, etc) are used,
instead of names based on the keys of the data dictionary (such as $cost),
because the keys might contain blanks.
:param data_dict: A Python dictionary
:return: A pair consisting of a string suitable for Cypher queries,
and a corresponding data-binding dictionary.
If the passed dictionary is empty or None,
the pair returned is ("", {})
"""
if data_dict is None or data_dict == {}:
return ("", {})
rel_props_list = [] # A list of strings
data_dictionary = {}
parameter_count = 1 # Sequential integers used in the data dictionary, such as "par_1", "par_2", etc.
for prop_key, prop_value in data_dict.items():
parameter_token = f"par_{parameter_count}" # EXAMPLE: "par_3"
# Extend the list of Cypher property relationships and their corresponding data dictionary
rel_props_list.append(f"`{prop_key}`: ${parameter_token}") # The $ refers to the data binding
data_dictionary[parameter_token] = prop_value
parameter_count += 1
rel_props_str = ", ".join(rel_props_list)
rel_props_str = "{" + rel_props_str + "}"
return (rel_props_str, data_dictionary)
############################################################################################
# #
# METHODS TO SUPPORT DEBUGGING #
# #
############################################################################################
def neo4j_query_params_from_dict(self, params: dict, char_limit=500) -> str:
"""
Given a Python dictionary, meant to represent value/key pairs,
compose and return a string suitable for pasting into the Neo4j browser, for testing purposes.
EXAMPLE: {'age': 22, 'gender': 'F'}
will produce the string
:param age=> 22;
:param gender=> 'F';
:param params: query parameters in the form of python dict
:param char_limit: limit number of characters to include in each line
:return: string of parameters to paste into Neo4j browser for testing procedures in the browser
"""
s = "" # String suitable for pasting into the Neo4j browser
for key, item in params.items():
prefix = "".join([":param ", key, "=> "])
if type(item) == int:
res = ("".join([prefix, str(item), ";"]))
elif type(item) == dict:
cypher_dict = "".join(["apoc.map.fromPairs([" + ",".join(
[f"['{key2}', {item2}]" for key2, item2 in item.items()]) + "])"])
res = ("".join([prefix, cypher_dict, ";"]))
else:
res = ("".join([prefix, "".join(['\'', str(item), '\'']), ";"]))
s += res[:char_limit] + "\n"
return s
############################################################################################
# #
# METHODS TO SUPPORT JSON IMPORT/EXPORT #
# #
############################################################################################
def export_dbase_json(self) -> {}:
"""
Export the entire Neo4j database as a JSON string
EXAMPLE:
{ 'nodes': 2,
'relationships': 1,
'properties': 6,
'data': '[{"type":"node","id":"3","labels":["User"],"properties":{"name":"Adam","age":32,"male":true}},\n
{"type":"node","id":"4","labels":["User"],"properties":{"name":"Eve","age":18}},\n
{"id":"1","type":"relationship","label":"KNOWS","properties":{"since":2003},"start":{"id":"3","labels":["User"]},"end":{"id":"4","labels":["User"]}}\n
]'
}
NOTE: the Neo4j Browser uses a slightly different format for NODES:
{
"identity": 4,
"labels": [
"User"
],
"properties": {
"name": "Eve",
"age": 18
}
}
and a substantially more different format for RELATIONSHIPS:
{
"identity": 1,
"start": 3,
"end": 4,
"type": "KNOWS",
"properties": {
"since": 2003
}
}
:return: A dictionary specifying the number of nodes exported,
the number of relationships, and the number of properties,
as well as a "data" field with the actual export in JSON format
"""
cypher = '''
CALL apoc.export.json.all(null,{useTypes:true, stream: true})
YIELD nodes, relationships, properties, data
RETURN nodes, relationships, properties, data
'''
result = self.query(cypher) # It returns a list with a single element
export_dict = result[0]
# print(export_dict)
pseudo_json = export_dict["data"]
# Who knows why, the string returned by the APOC function isn't actual JSON! :o Some tweaking needed to produce valid JSON...
json = "[" + pseudo_json.replace("\n", ",\n ") + "\n]" # The newlines \n make the JSON much more human-readable
export_dict["data"] = json
# print(export_dict)
return export_dict
def import_json_data(self, json_str: str):
"""
Import nodes and/or relationships into the database, as directed by the given data dump in JSON form.
Note: the id's of the nodes need to be shifted,
because one cannot force the Neo4j internal id's to be any particular value...
and, besides (if one is importing into an existing database), particular id's may already be taken.
:param json_str: A JSON string with the format specified under export_dbase_json()
:return: A status message with import details if successful, or an Exception if not
"""
try:
json_list = json.loads(json_str) # Turn the string (representing a JSON list) into a list
except Exception as ex:
raise Exception(f"Incorrectly-formatted JSON string. {ex}")
if self.debug:
print("json_list: ", json_list)
assert type(json_list) == list, "The JSON string does not represent the expected list"
id_shifting = {} # To map the Neo4j internal ID's specified in the JSON data dump
# into the ID's of newly-created nodes
# Do an initial pass for correctness, to try to avoid partial imports
for i, item in enumerate(json_list):
# We use item.get(key_name) to handle without error situation where the key is missing
if (item.get("type") != "node") and (item.get("type") != "relationship"):
raise Exception(
f"Item in list index {i} must have a 'type' of either 'node' or 'relationship'. Nothing imported. Item: {item}")
if item["type"] == "node":
if "id" not in item:
raise Exception(
f"Item in list index {i} is marked as 'node' but it lacks an 'id'. Nothing imported. Item: {item}")
elif item["type"] == "relationship":
if "label" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'label'. Nothing imported. Item: {item}")
if "start" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'start' value. Nothing imported. Item: {item}")
if "end" not in item:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but lacks a 'end' value. Nothing imported. Item: {item}")
if "id" not in item["start"]:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but its 'start' value lacks an 'id'. Nothing imported. Item: {item}")
if "id" not in item["end"]:
raise Exception(
f"Item in list index {i} is marked as 'relationship' but its 'end' value lacks an 'id'. Nothing imported. Item: {item}")
# First, process all the nodes, and in the process create the id_shifting map
num_nodes_imported = 0
for item in json_list:
if item["type"] == "node":
if self.debug:
print("ADDING NODE: ", item)
print(f' Creating node with label `{item["labels"][0]}` and properties {item["properties"]}')
old_id = int(item["id"])
new_id = self.create_node_by_label_and_dict(item["labels"][0], item[
"properties"]) # TODO: Only the 1st label is used for now
id_shifting[old_id] = new_id
num_nodes_imported += 1
if self.debug:
print("id_shifting map:", id_shifting)
# Then process all the relationships, linking to the correct (newly-created) nodes by using the id_shifting map
num_rels_imported = 0
for item in json_list:
if item["type"] == "relationship":
if self.debug:
print("ADDING RELATIONSHIP: ", item)
rel_name = item["label"]
rel_props = item.get(
"properties") # Also works if no "properties" is present (relationships may lack it)
start_id_original = int(item["start"]["id"])
end_id_original = int(item["end"]["id"])
start_id_shifted = id_shifting[start_id_original]
end_id_shifted = id_shifting[end_id_original]
# print(f' Creating relationship named `{rel_name}` from node {start_id_shifted} to node {end_id_shifted}, with properties {rel_props}')
self.link_nodes_by_ids(start_id_shifted, end_id_shifted, rel_name, rel_props)
num_rels_imported += 1
return f"Successful import of {num_nodes_imported} node(s) and {num_rels_imported} relationship(s)"
############################################################################################
# #
# METHODS TO SUPPORT RDF PROCEDURES #
# #
############################################################################################
def rdf_generate_uri(self,
dct={},
include_label_in_uri=True,
prefix='neo4j://graph.schema#',
add_prefixes=[],
sep='/',
uri_prop='uri') -> None:
"""
A method that
- on the neo4j nodes with labels equal to keys of :dict dictionary
- sets additional label Resource (for handling in RDF)
- sets property with name :uri_prop with value that starts with prefix followed by a string
built by concatenating with separator :sep the list of :add_prefixes together with values of
properties on each node that are specified in the values of the :dict (different set for each Neo4j label)
Used for the purpose of being able to save and restore subgraphs using methods rdf_get_subgraph and
rdf_import_subgraph_inline.
:param dct: dictionary describing set of node properties that construct a primary key (and eventually uri) for that node
EXAMPLE1 (simple):
dct = {
'Vehicle': ['type', 'model'],
'Car': ['model', 'fuel']
}
generate_uri(dct)
will set property uri like 'neo4j://graph.schema#car/toyota' on nodes with labels Vehicle
(in case v.type == 'car' and v.model == 'toyota')
and set property uri like 'neo4j://graph.schema#toyota/petrol' on nodes with labels Car
(in case c.model == 'toyota' and v.fuel == 'petrol')
EXAMPLE2 (properties and neighbouring properties):
graph = CREATE (v:Vehicle{`producer`: 'Toyota'}),
(m:Model{`name`: 'Prius'}),
(v)-[:HAS_MODEL]->(m)
dct = {
"Vehicle": {"properties": "producer"},
"Model": {"properties": ["name"],
"neighbours": [
{"label": "Vehicle", "relationship": "HAS_MODEL", "property": producer"}
]
}
}
set URI on 'Vehicle' nodes using node's property "producer"
uri = 'neo4j://graph.schema#Vehicle/Toyota'
set URI on 'Model' nodes using node's property "name" and neighbouring node's property "producer"
uri = 'neo4j://graph.schema#Model/Toyota/Prius'
:param prefix: a prefix for uri
:param add_prefixes: list of prefixes to prepend uri with (after prefix), list joined with :sep separator
:param sep: separator for joining add_perfixes and the primary keys into uri
:return: None
"""
for label, config in dct.items():
assert isinstance(label, str)
assert any(isinstance(config, t) for t in [list, str, dict])
where = ""
neighbours = False
neighbours_query = ""
if isinstance(config, str):
properties_ext = [config]
elif isinstance(config, list):
properties_ext = config
elif isinstance(config, dict):
if 'properties' in config.keys():
if isinstance(config['properties'], str):
properties_ext = [config['properties']]
elif isinstance(config['properties'], list):
properties_ext = config['properties']
if 'neighbours' in config.keys():
assert isinstance(config['neighbours'], list), \
f"neighbours should be of type LIST [{{}}[, {{}}]] not {type(config['neighbours'])}"
for i, neighbour in enumerate(config['neighbours']):
if isinstance(neighbour, list): #if a list converting it to a dict as per req.
assert len(neighbour) == 3, \
f"each neighbour should be of length 3: [<label>, <relationship>, <property>] got: {neighbour}"
neighbour = {'label': neighbour[0], 'relationship': neighbour[1], 'property': neighbour[2]}
config['neighbours'][i] = neighbour
assert isinstance(neighbour, dict), \
f"each neighbour should be of type DICT not {type(neighbour)}"
for key in ['label', 'relationship', 'property']:
assert key in neighbour.keys(), f"{key} not found in {neighbour}"
neighbours = True
neighbours_query = """
WITH *
UNWIND apoc.coll.zip(range(0,size($neighbours)-1), $neighbours) as pair
WITH *, pair[0] as ind, pair[1] as neighbour
CALL apoc.path.expand(x, neighbour['relationship'], neighbour['label'], 1, 1)
YIELD path
WITH x, ind, nodes(path) as ind_neighbours
UNWIND ind_neighbours as nbr
WITH DISTINCT x, ind, nbr
WHERE x<>nbr
WITH *
ORDER BY x, ind, id(nbr)
WITH x, ind, collect(nbr) as coll
WITH x, ind, apoc.map.mergeList(coll) as nbr
WITH x, collect({index: ind, map: nbr}) as nbrs"""
if 'where' in config.keys():
where = config['where']
else:
properties_ext = []
cypher = f"""
MATCH (x:`{label}`)
{where}
{neighbours_query}
SET x:Resource
SET
x.
`{uri_prop}` = apoc.text.urlencode(
$prefix + apoc.text.join($add_prefixes + $opt_label +
{"[nbr in nbrs | nbr['map'][$neighbours[nbr['index']]['property']]] +" if neighbours else ""}
[prop in $properties | x[prop]], $sep)
)
"""
cypher_dict = {
'prefix': prefix,
'add_prefixes': add_prefixes,
'sep': sep,
'opt_label': ([label] if include_label_in_uri else []),
'properties': properties_ext
}
if neighbours:
cypher_dict.update({
'neighbours': config['neighbours']
})
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
self.query(cypher, cypher_dict)
self._rdf_uri_cleanup()
def rdf_get_subgraph(self, cypher: str, cypher_dict={}, format="Turtle-star") -> str:
"""
A method that returns an RDF serialization of a subgraph specified by :cypher query
:param cypher: cypher query to return a subgraph
:param cypher_dict: parameters required for the cypher query
:param format: RDF format in which to serialize output
:return: str - RDF serialization of subgraph
"""
self._rdf_subgraph_cleanup()
url = self.rdf_host + "neo4j/cypher"
j = ({'cypher': cypher, 'format': format, 'cypherParams': cypher_dict})
response = requests.post(
url=url,
json=j,
auth=self.credentials)
# TODO: switch to detached HTTP endpoint when code from neo4j is available
# see https://community.neo4j.com/t/export-procedure-that-returns-serialized-rdf/38781/2
return response.text
def rdf_import_fetch(self, url: str, format="Turtle-star"):
cypher = "CALL n10s.rdf.import.fetch ($url, $format) YIELD terminationStatus, triplesLoaded, triplesParsed, " \
"namespaces, extraInfo, callParams"
cypher_dict = {'url': url, 'format': format}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
return self.query(cypher, cypher_dict)
def rdf_import_subgraph_inline(self, rdf: str, format="Turtle-star"):
"""
A method that creates/merges appropriate nodes in Neo4j as specified in the provided :rdf string
The nodes will be MERGEd by 'uri' property
:param rdf: RDF serialization of Neo4j nodes and relationships
:param format: RDF serialization format
:return: returns a dictionary with keys triplesParsed, triplesLoaded as a summary of the operation
"""
assert self.rdf, "rdf option is not enabled at init of NeoInterface class"
if not self.autoconnect:
self.rdf_setup_connection()
cypher = """
CALL n10s.rdf.import.inline($rdf, $format)
YIELD triplesParsed, triplesLoaded, extraInfo
RETURN *
"""
# cypher_dict = {'rdf':rdf.encode('utf-8').decode('utf-8'), 'format': format}
cypher_dict = {'rdf': rdf, 'format': format}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
res = self.query(cypher, cypher_dict)
self._rdf_subgraph_cleanup()
if len(res) > 0:
return res[0]
else:
return {'triplesParsed': 0, 'triplesLoaded': 0, 'extraInfo': ''}
def _rdf_subgraph_cleanup(self):
# in case labels with spaces where serialized new labels with spaces being replaced with %20 could have been created
# this helper function is supposed to revert the change
cypher = """
UNWIND $labels as label
CALL apoc.refactor.rename.label(label, apoc.text.regreplace(label, '%20', ' '))
YIELD batches, failedBatches, total, failedOperations
RETURN batches, failedBatches, total, failedOperations
"""
cypher_dict = {'labels': [label for label in self.get_labels() if "%20" in label]}
if self.debug:
print(f"""
query: {cypher}
parameters: {cypher_dict}
""")
self.query(cypher, cypher_dict)
# in case properties with spaces where serialized new properties with spaces being replaced with %20 could have been created
# this helper function is supposed to revert the change
cypher2 = """
CALL db.schema.nodeTypeProperties() YIELD nodeLabels, propertyName
WHERE propertyName contains "%20"
CALL apoc.cypher.doIt(
'MATCH (node:`' + apoc.text.join(nodeLabels, '`:`') + '`) ' +
'WHERE "' + propertyName + '" in keys(node)' +
'SET node.`' + apoc.text.replace(propertyName, '%20', ' ') + '` = node.`' + propertyName + '`' +
'REMOVE node.`' + propertyName + '`'
,
{}
) YIELD value
RETURN value['node']
"""
cypher_dict2 = {}
if self.debug:
print(f"""
query: {cypher2}
parameters: {cypher_dict2}
""")
self.query(cypher2, cypher_dict2)
self._rdf_uri_cleanup()
def _rdf_uri_cleanup(self):
# URIs - replace selected encoded values with their original characters (for readability)
cypher3 = """
MATCH (n)
WHERE n.uri is not null
SET n.uri = apoc.text.replace(n.uri, '%23', '#')
SET n.uri = apoc.text.replace(n.uri, '%2F', '/')
SET n.uri = apoc.text.replace(n.uri, '%3A', ':')
"""
cypher_dict3 = {}
if self.debug:
print(f"""
query: {cypher3}
parameters: {cypher_dict3}
""")
self.query(cypher3, cypher_dict3)
def rdf_get_graph_onto(self):
"""
A method that returns an ontology autogenerated from existing nodes in Neo4j (provided by n10s(neosemantics) library
:return: str - serialized ontology
"""
assert self.rdf, "rdf option is not enabled at init of NeoInterface class"
url = self.rdf_host + "neo4j/onto"
response = requests.get(
url=url,
auth=self.credentials)
return response.text
|
en
| 0.690222
|
# The Neo4j python connectivity library "Neo4j Python Driver" # The version of the Neo4j driver being used # To check returned data types High level class to interact with neo4j from Python. It provides a higher-level wrapper around the Neo4j python connectivity library "Neo4j Python Driver", documented at: https://neo4j.com/docs/api/python-driver/current/api.html SECTIONS IN THIS CLASS: * INIT * METHODS TO RUN GENERIC QUERIES * METHODS TO RETRIEVE DATA * METHODS TO CREATE/MODIFY SCHEMA * METHODS TO CREATE/MODIFY DATA * METHODS TO CREATE NEW RELATIONSHIPS * METHODS TO READ IN DATA * UTILITY METHODS * METHODS TO SUPPORT DEBUGGING * METHODS TO SUPPORT JSON IMPORT/EXPORT * METHODS TO SUPPORT RDF PROCEDURES AUTHORS: <NAME> and <NAME>, GlaxoSmithKline Based in part on Neo4jLiaison library (MIT License: https://github.com/BrainAnnex/neo4j-liaison) If unable to create a Neo4j driver object, raise an Exception reminding the user to check whether the Neo4j database is running :param host: URL to connect to database with. DEFAULT: read from NEO4J_HOST environmental variable :param credentials: Pair of strings (tuple or list) containing, respectively, the database username and password DEFAULT: read from NEO4J_USER and NEO4J_PASSWORD environmental variables if None then no authentication is used :param apoc: Flag indicating whether apoc library is used on Neo4j database to connect to :param verbose: Flag indicating whether a verbose mode is to be used by all methods of this class :param debug: Flag indicating whether a debug mode is to be used by all methods of this class :param autoconnect Flag indicating whether the class should establish connection to database at initialization # TODO: add test for autoconnect == False # Attempt to create a driver object # Extra initializations if APOC custom procedures (note: APOC must also be enabled on the database) # if apoc: # self.setup_all_apoc_custom() # Extra initializations if RDF support required # This unpacking will work whether the credentials were passed as a tuple or list # Object to connect to Neo4j's Bolt driver for Python # Object to connect to Neo4j's Bolt driver for Python # Return the version of the Neo4j driver being used. EXAMPLE: "4.2.1" Terminate the database connection. Note: this method is automatically invoked after the last operation of a "with" statement :return: None ############################################################################################ # # # METHODS TO RUN GENERIC QUERIES # # # ############################################################################################ Run a general Cypher query and return a list of dictionaries. In cases of error, return an empty list. A new session to the database driver is started, and then immediately terminated after running the query. NOTE: if the Cypher query returns a node, and one wants to extract its internal Neo4j ID or labels (in addition to all the properties and their values) then use query_expanded() instead. :param q: A Cypher query :param params: An optional Cypher dictionary EXAMPLE, assuming that the cypher string contains the substrings "$node_id": {'node_id': 20} :return: A (possibly empty) list of dictionaries. Each dictionary in the list will depend on the nature of the Cypher query. EXAMPLES: Cypher returns nodes (after finding or creating them): RETURN n1, n2 -> list item such as {'n1': {'gender': 'M', 'patient_id': 123} 'n2': {'gender': 'F', 'patient_id': 444}} Cypher returns attribute values that get renamed: RETURN n.gender AS client_gender, n.pid AS client_id -> list items such as {'client_gender': 'M', 'client_id': 123} Cypher returns attribute values without renaming: RETURN n.gender, n.pid -> list items such as {'n.gender': 'M', 'n.pid': 123} Cypher returns a single computed value -> a single list item such as {"count(n)": 100} Cypher returns a single relationship, with or without attributes: MERGE (c)-[r:PAID_BY]->(p) -> a single list item such as [{ 'r': ({}, 'PAID_BY', {}) }] Cypher creates nodes (without returning them) -> empty list # Start a new session, use it, and then immediately close it # Note: result is a neo4j.Result object; # more specifically, an object of type neo4j.work.result.Result # See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result # Return the result as a list of dictionaries. # This must be done inside the "with" block, # while the session is still open Expanded version of query(), meant to extract additional info for queries that return Graph Data Types, i.e. nodes, relationships or paths, such as "MATCH (n) RETURN n", or "MATCH (n1)-[r]->(n2) RETURN r" For example, if nodes were returned, and their Neo4j internal IDs and/or labels are desired (in addition to all the properties and their values) Unless the flatten flag is True, individual records are kept as separate lists. For example, "MATCH (b:boat), (c:car) RETURN b, c" will return a structure such as [ [b1, c1] , [b2, c2] ] if flatten is False, vs. [b1, c1, b2, c2] if flatten is True. (Note: each b1, c1, etc, is a dictionary.) TODO: Scenario to test: if b1 == b2, would that still be [b1, c1, b1(b2), c2] or [b1, c1, c2] - i.e. would we remove the duplicates? Try running with flatten=True "MATCH (b:boat), (c:car) RETURN b, c" on data like "CREATE (b:boat), (c1:car1), (c2:car2)" :param q: A Cypher query :param params: An optional Cypher dictionary EXAMPLE, assuming that the cypher string contains the substring "$age": {'age': 20} :param flatten: Flag indicating whether the Graph Data Types need to remain clustered by record, or all placed in a single flattened list. :return: A (possibly empty) list of dictionaries, which will depend on which Graph Data Types were returned in the Cypher query. EXAMPLE - for a returned node {'gender': 'M', 'age': 20, 'neo4j_id': 123, 'neo4j_labels': ['patient']} EXAMPLE - for a returned relationship {'price': 7500, 'neo4j_id': 2, 'neo4j_start_node': <Node id=11 labels=frozenset() properties={}>, 'neo4j_end_node': <Node id=14 labels=frozenset() properties={}>, 'neo4j_type': 'bought_by'}] # Start a new session, use it, and then immediately close it # Note: result is a neo4j.Result iterable object; # more specifically, an object of type neo4j.work.result.Result # See https://neo4j.com/docs/api/python-driver/current/api.html#neo4j.Result # The following must be done inside the "with" block, while the session is still open # Note: record is a neo4j.Record object - an immutable ordered collection of key-value pairs. # (the keys are the dummy names used for the nodes, such as "n") # See https://neo4j.com/docs/api/python-driver/current/api.html#record # EXAMPLE of record (if node n was returned): # <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}>> # (it has one key, "n") # EXAMPLE of record (if node n and node c were returned): # <Record n=<Node id=227 labels=frozenset({'person', 'client'}) properties={'gender': 'M', 'age': 99}> # c=<Node id=66 labels=frozenset({'car'}) properties={'color': 'blue'}>> # (it has 2 keys, "n" and "c") # Note: item is a neo4j.graph.Node object # OR a neo4j.graph.Relationship object # OR a neo4j.graph.Path object # See https://neo4j.com/docs/api/python-driver/current/api.html#node # https://neo4j.com/docs/api/python-driver/current/api.html#relationship # https://neo4j.com/docs/api/python-driver/current/api.html#path # EXAMPLES of item: # <Node id=95 labels=frozenset({'car'}) properties={'color': 'white', 'make': 'Toyota'}> # <Relationship id=12 nodes=(<Node id=147 labels=frozenset() properties={}>, <Node id=150 labels=frozenset() properties={}>) type='bought_by' properties={'price': 7500}> # EXAMPLE: {'gender': 'M', 'age': 99} # Example: 227 # Example: ['person', 'client'] # Example: 227 # A neo4j.graph.Node object with "id", "labels" and "properties" # A neo4j.graph.Node object with "id", "labels" and "properties" # Example: <Node id=118 labels=frozenset({'car'}) properties={'color': 'white'}> # The name of the relationship # The sequence of Node objects in this path ################################################################################################## # # # METHODS TO RETRIEVE DATA # # # ################################################################################################## For situations where one is fetching just 1 field, and one desires a list of those values, rather than a dictionary of records. In other respects, similar to the more general get_nodes() EXAMPLES: fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"}) will RETURN a list of prices of all the Toyota models fetch_single_field("car", "price", properties_condition={"car_make": "Toyota"}, clause="n.price < 50000") will RETURN a list of prices of all the Toyota models that cost less than 50000 :param field_name: A string with the name of the desired field (attribute) For more information on the other parameters, see get_nodes() :return: A list of the values of the field_name attribute in the nodes that match the specified conditions EXAMPLES: get_nodes("") # Get ALL nodes get_nodes("client") get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"}) get_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000") get_nodes("client", cypher_clause = "n.age > $some_age", cypher_dict = {"$some_age": 40}) get_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} , cypher_clause = "n.age > 40 OR n.income < 50000") RETURN a list of the records (as dictionaries of ALL the key/value node properties) corresponding to all the Neo4j nodes with the specified label, AND satisfying the given Cypher CLAUSE (if present), AND exactly matching ALL of the specified property key/values pairs (if present). I.e. an implicit AND operation. IMPORTANT: nodes referred to in the Cypher clause must be specified as "n." A dictionary of data binding (cypher_dict) for the Cypher clause may be optionally specified. In case of conflict (any key overlap) between the dictionaries cypher_dict and properties_condition, and Exception is raised. Optionally, the Neo4j internal node ID and label name(s) may also be obtained and returned. :param labels: A string (or list/tuple of strings) specifying one or more Neo4j labels; an empty string indicates that the match is to be carried out across all labels - NOT RECOMMENDED for large databases! (Note: blank spaces ARE allowed in the strings) :param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40} :param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n." EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000" EXAMPLE with data-binding: "n.age > $age OR n.income < $income" (data-binding values are specified in cypher_dict) :param properties_condition: A (possibly-empty) dictionary of property key/values pairs. Example: {"gender": "M", age: 64} IMPORTANT: cypher_dict and properties_dict must have no overlapping keys, or an Exception will be raised :param return_nodeid: Flag indicating whether to also include the Neo4j internal node ID in the returned data (using "neo4j_id" as its key in the returned dictionary) :param return_labels: Flag indicating whether to also include the Neo4j label names in the returned data (using "neo4j_labels" as its key in the returned dictionary) :return: A list whose entries are dictionaries with each record's information (the node's attribute names are the keys) EXAMPLE: [ {"gender": "M", "age": 42, "condition_id": 3}, {"gender": "M", "age": 76, "location": "Berkeley"} ] Note that ALL the attributes of each node are returned - and that they may vary across records. If the flag return_nodeid is set to True, then an extra key/value pair is included in the dictionaries, of the form "neo4j_id": some integer with the Neo4j internal node ID If the flag return_labels is set to True, then an extra key/value pair is included in the dictionaries, of the form "neo4j_labels": [list of Neo4j label(s) attached to that node] EXAMPLE using both of the above flags: [ {"neo4j_id": 145, "neo4j_labels": ["person", "client"], "gender": "M", "age": 42, "condition_id": 3}, {"neo4j_id": 222, "neo4j_labels": ["person"], "gender": "M", "age": 76, "location": "Berkeley"} ] # TODO: provide an option to specify the desired fields In get_nodes(). query: {cypher} parameters: {cypher_dict} # If we want to return both 'neo4j_id' and 'neo4j_labels', we're done, because query_expanded() provides both # If we get thus far, it means that either the 'neo4j_id' or the 'neo4j_labels' attribute isn't wanted; # remove the unwanted one from all the dictionaries in the elements of result_list Same as get_nodes(), but the result is returned as a Pandas dataframe [See get_nodes() for information about the arguments] :param labels: :param properties_condition: :param cypher_clause: :param cypher_dict: :param return_nodeid: :param return_labels: :return: A Pandas dataframe Turn a set of specification into the MATCH part of the Cypher query, and its data-binding dictionary. EXAMPLES: _match_nodes("client") _match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"}) _match_nodes("client", cypher_clause = "n.age > 40 OR n.income < 50000") _match_nodes("client", cypher_clause = "n.age > $age", cypher_dict = {"$age": 40}) _match_nodes("client", properties_condition = {"gender": "M", "ethnicity": "white"} , cypher_clause = "n.age > 40 OR n.income < 50000") RETURN the MATCH part of the Cypher query, and its data-binding dictionary, corresponding to all the Neo4j nodes with the given label or labels (if specified), AND satisfying the given Cypher CLAUSE (if specified, and optionally with the data-binding cypher_dict), AND exactly matching ALL of the specified property key/values pairs (if specified). I.e. an implicit AND operation. Note: cypher_dict should not contain keys of the form `par_n`, where n is an integer, or an Exception might results. :param labels: A string, or list/tuple of strings, of Neo4j labels (Note: blank spaces ARE allowed) :param properties_condition: A (possibly-empty) dictionary of property key/values pairs. Example: {"gender": "F", "age": 22} If None or empty, no restrictions are placed on the match :param cypher_clause: String with a clause to refine the search; any nodes it refers to, MUST be specified as "n." EXAMPLE with hardwired values: "n.age > 40 OR n.income < 50000" EXAMPLE with data-binding: "n.age > $age OR n.income < $income" (data-binding values are specified in cypher_dict) :param cypher_dict: Dictionary of data binding for the Cypher string. EXAMPLE: {"gender": "M", "age": 40} It should not contain any keys of the form `par_n`, where n is an integer (those names are reserved for internal use) :return: A pair consisting of the MATCH part of the Cypher query, and its data-binding dictionary # Transform the dictionary properties_condition into a string describing its corresponding Cypher clause, # plus a corresponding data-binding dictionary. # (assuming an implicit AND between equalities described by the dictionary terms), # # EXAMPLE: # properties_condition: {"gender": "F", "year first met": 2003} # will lead to: # clause_from_properties = "{`gender`: $par_1, `year first met`: $par_2}" # props_data_binding = {'par_1': "F", 'par_2': 2003} # The properties dictionary is to be used as the Cypher-binding dictionary # Merge the properties dictionary into the existing cypher_dict, PROVIDED that there's no conflict # Take the set intersection # If not equal to the empty set # Merge the properties dictionary into the existing cypher_dict # Zap any leading/trailing blanks # Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher # Construct the Cypher string Turn the given string, or list/tuple of strings - representing Neo4j labels - into a string suitable for inclusion in a Cypher query. Blanks ARE allowed in names. EXAMPLES: "client" gives rise to ":`client`" ["car", "car manufacturer"] gives rise to ":`car`:`car manufacturer`" :param labels: A string, or list/tuple of strings, representing Neo4j labels :return: A string suitable for inclusion in a Cypher query # Turn the label strings, or list/tuple of labels, into a string suitable for inclusion into Cypher # EXAMPLE: ":`label 1`:`label 2`" Fetch all the nodes connected to the given one by INbound relationships to it (its "parents"), as well as by OUTbound relationships to it (its "children") :param node_id: An integer with a Neo4j internal node ID :return: A dictionary with 2 keys: 'parent_list' and 'child_list' The values are lists of dictionaries with 3 keys: "id", "label", "rel" EXAMPLE of individual items in either parent_list or child_list: {'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'} # Fetch the parents query: {cypher} # A new neo4j.Result object # EXAMPLE of parent_list: # [{'id': 163, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}, # {'id': 150, 'labels': ['Subject'], 'rel': 'HAS_TREATMENT'}] # Fetch the children query: {cypher} # A new neo4j.Result object # EXAMPLE of child_list: # [{'id': 107, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'}, # {'id': 103, 'labels': ['Source Data Row'], 'rel': 'FROM_DATA'}] Extract and return a list of all the Neo4j labels present in the database. No particular order should be expected. TODO: test when there are nodes that have multiple labels :return: A list of strings Extract and return a list of all the Neo4j relationship types present in the database. No particular order should be expected. :return: A list of strings CALL db.schema.nodeTypeProperties() YIELD nodeLabels, propertyName WHERE $label in nodeLabels and propertyName IS NOT NULL RETURN DISTINCT propertyName ORDER BY propertyName ######################################################################################### # # # METHODS TO GET/CREATE/MODIFY SCHEMA # # # ######################################################################################### Return all the database indexes, and some of their attributes, as a Pandas dataframe. Optionally restrict the type (such as "BTREE") of indexes returned. EXAMPLE: labelsOrTypes name properties type uniqueness 0 [my_label] index_23b0962b [my_property] BTREE NONUNIQUE 1 [my_label] some_name [another_property] BTREE UNIQUE :param types: Optional list to of types to limit the result to :return: A (possibly-empty) Pandas dataframe # Define a restrictive clause call db.indexes() yield name, labelsOrTypes, properties, type, uniqueness {where} return * Return all the database constraints, and some of their attributes, as a Pandas dataframe with 3 columns: name EXAMPLE: "my_constraint" description EXAMPLE: "CONSTRAINT ON ( patient:patient ) ASSERT (patient.patient_id) IS UNIQUE" details EXAMPLE: "Constraint( id=3, name='my_constraint', type='UNIQUENESS', schema=(:patient {patient_id}), ownedIndex=12 )" :return: A (possibly-empty) Pandas dataframe call db.constraints() yield name, description, details return * Create a new database index, unless it already exists, to be applied to the specified label and key (property). The standard name given to the new index is of the form label.key EXAMPLE - to index nodes labeled "car" by their key "color": create_index("car", "color") This new index - if not already in existence - will be named "car.color" If an existing index entry contains a list of labels (or types) such as ["l1", "l2"] , and a list of properties such as ["p1", "p2"] , then the given pair (label, key) is checked against ("l1_l2", "p1_p2"), to decide whether it already exists. :param label: A string with the node label to which the index is to be applied :param key: A string with the key (property) name to which the index is to be applied :return: True if a new index was created, or False otherwise # A Pandas dataframe with info about indexes; # in particular 2 columns named "labelsOrTypes" and "properties" # Index is created if not already exists. # a standard name for the index is assigned: `{label}.{key}` # Proceed by row For example, if the Pandas dataframe existing_indexes contains the following columns: labelsOrTypes properties 0 [car] [color, make] 1 [person] [sex] then existing_standard_names will be: [('car', 'color_make'), ('person', 'sex')] query: {q} Create a uniqueness constraint for a node property in the graph, unless a constraint with the standard name of the form `{label}.{key}.{type}` is already present Note: it also creates an index, and cannot be applied if an index already exists. EXAMPLE: create_constraint("patient", "patient_id") :param label: A string with the node label to which the constraint is to be applied :param key: A string with the key (property) name to which the constraint is to be applied :param type: For now, the default "UNIQUE" is the only allowed option :param name: Optional name to give to the new constraint; if not provided, a standard name of the form `{label}.{key}.{type}` is used. EXAMPLE: "patient.patient_id.UNIQUE" :return: True if a new constraint was created, or False otherwise # TODO: consider other types of constraints # constraint is created if not already exists. # a standard name for a constraint is assigned: `{label}.{key}.{type}` if name was not provided query: {q} # Note: creation of a constraint will crash if another constraint, or index, already exists # for the specified label and key Eliminate the index with the specified name. :param name: Name of the index to eliminate :return: True if successful or False otherwise (for example, if the index doesn't exist) query: {q} # Note: it crashes if the index doesn't exist Eliminate all the indexes in the database and, optionally, also get rid of all constraints :param including_constraints: Flag indicating whether to also ditch all the constraints :return: None Eliminate the constraint with the specified name. :param name: Name of the constraint to eliminate :return: True if successful or False otherwise (for example, if the constraint doesn't exist) query: {q} # Note: it crashes if the constraint doesn't exist Eliminate all the constraints in the database :return: None ##################################################################################### # # # METHODS TO CREATE/MODIFY DATA # # # ##################################################################################### Create a new node with the given label and with the attributes/values specified in the items dictionary Return the Neo4j internal ID of the node just created. :param labels: A string, or list/tuple of strings, of Neo4j label (ok to include blank spaces) :param properties: An optional dictionary of properties to set for the new node. EXAMPLE: {'age': 22, 'gender': 'F'} :return: An integer with the Neo4j internal ID of the node just created # From the dictionary of attribute names/values, # create a part of a Cypher query, with its accompanying data dictionary # EXAMPLE: # attributes_str = '{`cost`: $par_1, `item description`: $par_2}' # data_dictionary = {'par_1': 65.99, 'par_2': 'the "red" button'} # Turn labels (string or list/tuple of labels) into a string suitable for inclusion into Cypher # Assemble the complete Cypher query In create_node_by_label_and_dict(). query: {cypher} parameters: {data_dictionary} # Return the Neo4j internal ID of the node just created Empty out (by default completely) the Neo4j database. Optionally, only delete nodes with the specified labels, or only keep nodes with the given labels. Note: the keep_labels list has higher priority; if a label occurs in both lists, it will be kept. IMPORTANT: it does NOT clear indexes; "ghost" labels may remain! To get rid of those, run drop_all_indexes() :param delete_labels: An optional string, or list of strings, indicating specific labels to DELETE :param keep_labels: An optional string or list of strings, indicating specific labels to KEEP (keep_labels has higher priority over delete_labels) :return: None # Delete ALL nodes AND ALL relationship from the database; for efficiency, do it all at once # In order to avoid memory errors, delete data in batches call apoc.periodic.iterate( 'MATCH (n) RETURN n', 'DETACH DELETE(n)', {{batchSize:{str(batch_size)}, parallel:false}}) YIELD total, batches, failedBatches RETURN total, batches, failedBatches query: {q} # If no specific labels to delete were given, # then consider all labels for possible deletion (unless marked as "keep", below) # If a string was passed, turn it into a list # Initialize list of labels to keep, if not provided # If a string was passed, turn it into a list # Delete all nodes with labels in the delete_labels list, # EXCEPT for any label in the keep_labels list query: {q} Use this to get rid of absolutely everything in the database. Optionally, keep nodes with a given label, or keep the indexes, or keep the constraints :param keep_labels: An optional list of strings, indicating specific labels to KEEP :param drop_indexes: Flag indicating whether to also ditch all indexes (by default, True) :param drop_constraints:Flag indicating whether to also ditch all constraints (by default, True) :return: None EXAMPLE - locate the "car" with vehicle id 123 and set its color to white and price to 7000 set_fields(labels = "car", set_dict = {"color": "white", "price": 7000}, properties_condition = {"vehicle id": 123}) LIMITATION: blanks are allowed in the keys of properties_condition, but not in those of set_dict :param labels: A string, or list/tuple of strings, representing Neo4j labels :param set_dict: A dictionary of field name/values to create/update the node's attributes (note: no blanks are allowed in the keys) :param properties_condition: :param cypher_clause: :param cypher_dict: :return: None # field_name, field_value are key/values in set_dict # Example: "n.`field1` = $field1" # Extend the Cypher data-binding dictionary # Example of data_binding at the end of the loop: {"field1": value1, "field2": value2} # Example: "SET n.field1 = $field1, n.field2 = $field2" # Example of cypher: # "MATCH (n:car {`vehicle id`: $par_1}) SET n.`color` = color, n.`price` = $field2" # Example of data binding: # {"par_1": 123, "color": "white", "price": 7000} :param mode:str; assert mode in ['merge', 'create'] :param label:str; label of the nodes to extract data from :param cypher: str; only of label not provided: cypher that returns id(node) of the nodes to extract data from EXAMPLE: cypher = ''' MATCH (f:`Source Data Table`{{_domain_:$domain}})-[:HAS_DATA]->(node:`Source Data Row`) RETURN id(node) ''' :param cypher_dict: None/dict parameters required for the cypher query EXAMPLE: cypher_dict={'domain':'ADSL'} :param target_label: label(s) of the newly created nodes with extracted data: list or str :param property_mapping: dict or list if dict: keys correspond to the property names of source data (e.g. Source Data Row) and values correspond to to the property names of the target class where the data is extracted to if list: properties of the extracted node (as per the list) will extracted and will be named same as in the source node :param relationship: type of the relationship (to/from the extraction node) to create :param direction: direction of the relationship to create (>: to the extraction node, <: from the extraction node) :return: None CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value MATCH (data) WHERE id(data) = value['id(node)'] RETURN data call apoc.periodic.iterate( $q_match_part , ' WITH data, apoc.coll.intersection(keys($mapping), keys(data)) as common_keys {("" if mode == "create" else "WHERE size(common_keys) > 0")} WITH data, apoc.map.fromLists([key in common_keys | $mapping[key]], [key in common_keys | data[key]]) as submap call apoc.{mode}.node($target_label, submap) YIELD node MERGE (data){rel_left}-[:`{relationship}`]-{rel_right}(node) ', {{batchSize:10000, parallel:false, params: $inner_params}}) YIELD total, batches, failedBatches RETURN total, batches, failedBatches ######################################################################################### # # # METHODS TO CREATE NEW RELATIONSHIPS # # # ######################################################################################### Creates relationship of type {relationship} ... :param left_class: Name of the left class :param right_class: Name of the right class :param relationship: Name to give the relationship (if None: will use name of right_class (f'HAS_{right_class.upper())') :param cond_via_node: Name of central node from which relationships will be created :param cond_left_rel: Name and direction of relationship from right_class (e.g. FROM_DATA> or <FROM_DATA) :param cond_right_rel: Name and direction of relationship from left_class (e.g. FROM_DATA> or <FROM_DATA) :param cond_cypher: (optional) - if not None: cond_via_node, cond_left_rel, cond_right_rel will be ignored instead the cypher query will be run which return nodes 'left' and 'right' to be linked with relationship of type {relationship} :param cond_cypher_dict: parameters required for the cypher query # checking compliance of provided parameters CALL apoc.cypher.run($cypher, $cypher_dict) YIELD value RETURN value.`left` as left, value.`right` as right MATCH (left){cond_left_rel_mark1}-[:`{cond_left_rel_type}`*0..1]-{cond_left_rel_mark2}(sdr:`{cond_via_node}`), (sdr){cond_right_rel_mark1}-[:`{cond_right_rel_type}`*0..1]-{cond_right_rel_mark2}(right) WHERE left:`{left_class}` and right:`{right_class}` RETURN left, right call apoc.periodic.iterate( '{periodic_part1}', ' MERGE (left)-[:`{relationship}`]->(right) ', {{batchSize:10000, parallel:false, params: {{cypher: $cypher, cypher_dict: $cypher_dict}}}}) YIELD total, batches, failedBatches RETURN total, batches, failedBatches Locate any pair of Neo4j nodes where all of the following hold: 1) the first one has label1 2) the second one has label2 3) the two nodes agree in the value of property1 (if property2 is None), or in the values of property1 in the 1st node and property2 in the 2nd node For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node, unless already present :param label1: A string against which the label of the 1st node must match :param label2: A string against which the label of the 2nd node must match :param property1: Name of property that must be present in the 1st node (and also in 2nd node, if property2 is None) :param property2: Name of property that must be present in the 2nd node (may be None) :param rel: Name to give to all relationships that get created :return: None MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{property1}` = y.`{property2}` MERGE (x)-[:{rel}]->(y) query: {q} Locate any pair of Neo4j nodes where all of the following hold: 1) the first one has label1 2) the second one has label2 3) both nodes have a property with the specified name 4) the string values of the properties in (3) in the two nodes are both equal to the specified value For any such pair found, add a relationship - with the name specified in the rel argument - from the 1st to 2nd node, unless already present :param label1: A string against which the label of the 1st node must match :param label2: A string against which the label of the 2nd node must match :param prop_name: Name of property that must be present in both nodes :param prop_value: A STRING value that the above property must have in both nodes :param rel: Name to give to all relationships that get created :return: None MATCH (x:`{label1}`), (y:`{label2}`) WHERE x.`{prop_name}` = "{prop_value}" AND y.`{prop_name}` = "{prop_value}" MERGE (x)-[:{rel}]->(y) query: {q} Locate the pair of Neo4j nodes with the given Neo4j internal ID's. If they are found, add a relationship - with the name specified in the rel argument, and with the specified optional properties - from the 1st to 2nd node, unless already present TODO: maybe return the Neo4j ID of the relationship just created :param node_id1: An integer with the Neo4j internal ID to locate the 1st node :param node_id2: An integer with the Neo4j internal ID to locate the 2nd node :param rel: A string specifying a Neo4j relationship name :param rel_props: Optional dictionary with the relationship properties. EXAMPLE: {'since': 2003, 'code': 'xyz'} :return: None # Process the optional relationship properties # EXAMPLE of cypher_rel_props: '{cost: $par_1, code: $par_2}' (possibly blank) # EXAMPLE of cypher_dict: {'par_1': 65.99, 'par_2': 'xyz'} (possibly empty) MATCH (x), (y) WHERE id(x) = $node_id1 and id(y) = $node_id2 MERGE (x)-[:`{rel}` {cypher_rel_props}]->(y) # Extend the (possibly empty) Cypher data dictionary, to also include a value for the key "node_id1" and "node_id2" query: {q} parameters: {cypher_dict} ##################################################################################################### # # # METHODS TO READ IN DATA # # # ##################################################################################################### Load a Pandas data frame into Neo4j. Each line is loaded as a separate node. TODO: maybe save the Panda data frame's row number as an attribute of the Neo4j nodes, to ALWAYS have a primary key :param df: A Pandas data frame to import into Neo4j :param label: String with a Neo4j label to use on the newly-created nodes :param merge: If True, records are replaced, rather than added, if already present; if False, always added :param primary_key: Only applicable when merging. String with the name of the field that serves as a primary key; if a new record with that field is to be added, it'll replace the current one TODO: to allow for list of primary_keys :param merge_overwrite: If True then on merge the existing nodes will be overwritten with the new data, otherwise they will be updated with new information (keys that are not present in the df will not be deleted) :param rename: Optional dictionary to rename the Pandas dataframe's columns to EXAMPLE {"current_name": "name_we_want"} :param max_chunk_size: To limit the number of rows loaded at one time :return: List of node ids, created in the operation # Rename the columns in the Pandas data frame # sleep to give Neo4j time to populate the index # EXAMPLE of primary_key_s: "{patient_id:record['patient_id']}" # A MERGE or CREATE operation, as needed # Split the operation into batches WITH $data AS data UNWIND data AS record {op} (x:`{label}`{primary_key_s}) SET x{('' if merge_overwrite else '+')}=record RETURN id(x) as node_id query: {cypher} parameters: {cypher_dict} Loads python dict to Neo4j (auto-unpacking hierarchy) Children of type dict converted into related nodes with relationship {rel_prefix}_{key} Children of type list (of dict or other) converted into: - multiple related nodes for list items of type dict - properties of parent node of type list in case list items see example in tests.test_json.test_import_custom_json :param dct: python dict to load :param label: label to assign to the root node :param rel_prefix: prefix to add to relationship name from parent to child :param maxdepth: maximum possible depth(of children) of dict :return: None # initial load of the complete json as a node CALL apoc.merge.node(['JSON',$label],{value:$value}) YIELD node RETURN node # unpacking hierarchy (looping until no nodes with JSON label are left or maxdepth reached MATCH (j:JSON) WITH j, apoc.convert.fromJsonMap(j.value) as map WITH j, map, keys(map) as ks UNWIND ks as k call apoc.do.case([ apoc.meta.type(map[k]) = 'MAP' , ' CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson($map[$k])}) YIELD node CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel RETURN node, rel ' , apoc.meta.type(map[k]) = 'LIST' , ' //first setting LIST property on main node WITH j, map, k, [i in map[k] WHERE apoc.meta.type(i) <> "MAP"] as not_map_lst call apoc.do.when( size(not_map_lst) <> 0, "call apoc.create.setProperty([j], $k, $not_map_lst) YIELD node RETURN node", "RETURN j", {j:j, k:k, not_map_lst:not_map_lst} ) YIELD value WITH *, [i in map[k] WHERE NOT i IN not_map_lst] as map_lst UNWIND map_lst as item_map CALL apoc.merge.node(["JSON", $k], {value: apoc.convert.toJson(item_map)}) YIELD node CALL apoc.merge.relationship(j,$rel_prefix + k, {}, {}, node, {}) YIELD rel RETURN node, rel ' ] , ' call apoc.create.setProperty([j], $k, $map[$k]) YIELD node RETURN node ' , {k: k, map: map, j: j, rel_prefix: $rel_prefix} ) YIELD value WITH DISTINCT j REMOVE j:JSON REMOVE j.value Loads data created in prototyping tool https://arrows.app/ Uses MERGE statement separately on each node and each relationship using all properties as identifying properties Example of use: with open("arrows.json", 'r') as jsonfile: dct = json.load(jsonfile) neo = NeoInterface() neo.load_arrows_dict(dct) :param dct: python dict to load :param merge_on: None or dict with label as key and list of properties as value - the properties will be used as identProps in apoc.merge.node, the rest of the properties will be used as onCreateProps and onMatchProps :return: result of the corresponding Neo4j query # if merge_on: UNWIND $map['nodes'] as nd WITH *, apoc.coll.intersection(nd['labels'], keys($merge_on)) as hc_labels // list of relevant labels from the merge_on map WITH *, apoc.coll.toSet(apoc.coll.flatten(apoc.map.values($merge_on, hc_labels))) as hc_props // list of relevant properties WITH *, [prop in hc_props WHERE prop in keys(nd['properties'])] as hc_props // filter to keep only the existing ones WITH *, CASE WHEN size(nd['labels']) = 0 THEN ['No Label'] ELSE nd['labels'] END as labels, CASE WHEN size(hc_props) > 0 THEN { identProps: CASE WHEN size(apoc.coll.intersection(keys(nd['properties']), hc_props)) = 0 and nd['caption'] <> '' THEN {value: nd['caption']} ELSE apoc.map.submap(nd['properties'], hc_props) END , onMatchProps: apoc.map.submap(nd['properties'], [key in keys(nd['properties']) WHERE NOT key IN hc_props]) } ELSE { identProps: CASE WHEN size(keys(nd['properties'])) = 0 and nd['caption'] <> '' THEN {value: nd['caption']} ELSE nd['properties'] END , onMatchProps: {} } END as props WITH nd, labels, props['identProps'] as identProps, props['onMatchProps'] as onMatchProps, props['onMatchProps'] as onCreateProps //TODO: change if these need to differ in the future //dummy property if no properties are ident WITH *, CASE WHEN identProps = {} THEN {_dummy_prop_:1} ELSE identProps END as identProps WITH *, apoc.map.mergeList([onCreateProps, {_timestamp: timestamp()}]) as onCreateProps, apoc.map.mergeList([onMatchProps, {_timestamp: timestamp()}]) as onMatchProps CALL apoc.do.when( size(apoc.coll.intersection(labels, $always_create)) > 0, "CALL apoc.create.node($labels, apoc.map.mergeList([$identProps, $onMatchProps, $onCreateProps])) YIELD node RETURN node", "CALL apoc.merge.node($labels, $identProps, $onMatchProps, $onCreateProps) YIELD node RETURN node", {labels: labels, identProps:identProps, onMatchProps:onMatchProps, onCreateProps:onCreateProps} ) yield value as value2 WITH *, value2['node'] as node //eliminating dummy property CALL apoc.do.when( identProps = {_dummy_prop_: 1}, 'REMOVE node._dummy_prop_ RETURN node', 'RETURN node', {node: node} ) YIELD value WITH * WITH apoc.map.fromPairs(collect([nd['id'], node])) as node_map UNWIND $map['relationships'] as rel call apoc.merge.relationship( node_map[rel['fromId']], CASE WHEN rel['type'] = '' OR rel['type'] IS NULL THEN 'RELATED' ELSE rel['type'] END, rel['properties'], {}, node_map[rel['toId']], {} ) YIELD rel as relationship WITH node_map, apoc.map.fromPairs(collect([rel['id'], relationship])) as rel_map RETURN node_map, rel_map ############################################################################################ # # # UTILITY METHODS # # # ############################################################################################ Turn a Python dictionary (meant for specifying node or relationship attributes) into a string suitable for Cypher queries, plus its corresponding data-binding dictionary. EXAMPLE : {'cost': 65.99, 'item description': 'the "red" button'} will lead to ( '{`cost`: $par_1, `item description`: $par_2}', {'par_1': 65.99, 'par_2': 'the "red" button'} ) Note that backticks are used in the Cypher string to allow blanks in the key names. Consecutively-named dummy variables ($par_1, $par_2, etc) are used, instead of names based on the keys of the data dictionary (such as $cost), because the keys might contain blanks. :param data_dict: A Python dictionary :return: A pair consisting of a string suitable for Cypher queries, and a corresponding data-binding dictionary. If the passed dictionary is empty or None, the pair returned is ("", {}) # A list of strings # Sequential integers used in the data dictionary, such as "par_1", "par_2", etc. # EXAMPLE: "par_3" # Extend the list of Cypher property relationships and their corresponding data dictionary # The $ refers to the data binding ############################################################################################ # # # METHODS TO SUPPORT DEBUGGING # # # ############################################################################################ Given a Python dictionary, meant to represent value/key pairs, compose and return a string suitable for pasting into the Neo4j browser, for testing purposes. EXAMPLE: {'age': 22, 'gender': 'F'} will produce the string :param age=> 22; :param gender=> 'F'; :param params: query parameters in the form of python dict :param char_limit: limit number of characters to include in each line :return: string of parameters to paste into Neo4j browser for testing procedures in the browser # String suitable for pasting into the Neo4j browser ############################################################################################ # # # METHODS TO SUPPORT JSON IMPORT/EXPORT # # # ############################################################################################ Export the entire Neo4j database as a JSON string EXAMPLE: { 'nodes': 2, 'relationships': 1, 'properties': 6, 'data': '[{"type":"node","id":"3","labels":["User"],"properties":{"name":"Adam","age":32,"male":true}},\n {"type":"node","id":"4","labels":["User"],"properties":{"name":"Eve","age":18}},\n {"id":"1","type":"relationship","label":"KNOWS","properties":{"since":2003},"start":{"id":"3","labels":["User"]},"end":{"id":"4","labels":["User"]}}\n ]' } NOTE: the Neo4j Browser uses a slightly different format for NODES: { "identity": 4, "labels": [ "User" ], "properties": { "name": "Eve", "age": 18 } } and a substantially more different format for RELATIONSHIPS: { "identity": 1, "start": 3, "end": 4, "type": "KNOWS", "properties": { "since": 2003 } } :return: A dictionary specifying the number of nodes exported, the number of relationships, and the number of properties, as well as a "data" field with the actual export in JSON format CALL apoc.export.json.all(null,{useTypes:true, stream: true}) YIELD nodes, relationships, properties, data RETURN nodes, relationships, properties, data # It returns a list with a single element # print(export_dict) # Who knows why, the string returned by the APOC function isn't actual JSON! :o Some tweaking needed to produce valid JSON... # The newlines \n make the JSON much more human-readable # print(export_dict) Import nodes and/or relationships into the database, as directed by the given data dump in JSON form. Note: the id's of the nodes need to be shifted, because one cannot force the Neo4j internal id's to be any particular value... and, besides (if one is importing into an existing database), particular id's may already be taken. :param json_str: A JSON string with the format specified under export_dbase_json() :return: A status message with import details if successful, or an Exception if not # Turn the string (representing a JSON list) into a list # To map the Neo4j internal ID's specified in the JSON data dump # into the ID's of newly-created nodes # Do an initial pass for correctness, to try to avoid partial imports # We use item.get(key_name) to handle without error situation where the key is missing # First, process all the nodes, and in the process create the id_shifting map # TODO: Only the 1st label is used for now # Then process all the relationships, linking to the correct (newly-created) nodes by using the id_shifting map # Also works if no "properties" is present (relationships may lack it) # print(f' Creating relationship named `{rel_name}` from node {start_id_shifted} to node {end_id_shifted}, with properties {rel_props}') ############################################################################################ # # # METHODS TO SUPPORT RDF PROCEDURES # # # ############################################################################################ #', A method that - on the neo4j nodes with labels equal to keys of :dict dictionary - sets additional label Resource (for handling in RDF) - sets property with name :uri_prop with value that starts with prefix followed by a string built by concatenating with separator :sep the list of :add_prefixes together with values of properties on each node that are specified in the values of the :dict (different set for each Neo4j label) Used for the purpose of being able to save and restore subgraphs using methods rdf_get_subgraph and rdf_import_subgraph_inline. :param dct: dictionary describing set of node properties that construct a primary key (and eventually uri) for that node EXAMPLE1 (simple): dct = { 'Vehicle': ['type', 'model'], 'Car': ['model', 'fuel'] } generate_uri(dct) will set property uri like 'neo4j://graph.schema#car/toyota' on nodes with labels Vehicle (in case v.type == 'car' and v.model == 'toyota') and set property uri like 'neo4j://graph.schema#toyota/petrol' on nodes with labels Car (in case c.model == 'toyota' and v.fuel == 'petrol') EXAMPLE2 (properties and neighbouring properties): graph = CREATE (v:Vehicle{`producer`: 'Toyota'}), (m:Model{`name`: 'Prius'}), (v)-[:HAS_MODEL]->(m) dct = { "Vehicle": {"properties": "producer"}, "Model": {"properties": ["name"], "neighbours": [ {"label": "Vehicle", "relationship": "HAS_MODEL", "property": producer"} ] } } set URI on 'Vehicle' nodes using node's property "producer" uri = 'neo4j://graph.schema#Vehicle/Toyota' set URI on 'Model' nodes using node's property "name" and neighbouring node's property "producer" uri = 'neo4j://graph.schema#Model/Toyota/Prius' :param prefix: a prefix for uri :param add_prefixes: list of prefixes to prepend uri with (after prefix), list joined with :sep separator :param sep: separator for joining add_perfixes and the primary keys into uri :return: None #if a list converting it to a dict as per req. WITH * UNWIND apoc.coll.zip(range(0,size($neighbours)-1), $neighbours) as pair WITH *, pair[0] as ind, pair[1] as neighbour CALL apoc.path.expand(x, neighbour['relationship'], neighbour['label'], 1, 1) YIELD path WITH x, ind, nodes(path) as ind_neighbours UNWIND ind_neighbours as nbr WITH DISTINCT x, ind, nbr WHERE x<>nbr WITH * ORDER BY x, ind, id(nbr) WITH x, ind, collect(nbr) as coll WITH x, ind, apoc.map.mergeList(coll) as nbr WITH x, collect({index: ind, map: nbr}) as nbrs MATCH (x:`{label}`) {where} {neighbours_query} SET x:Resource SET x. `{uri_prop}` = apoc.text.urlencode( $prefix + apoc.text.join($add_prefixes + $opt_label + {"[nbr in nbrs | nbr['map'][$neighbours[nbr['index']]['property']]] +" if neighbours else ""} [prop in $properties | x[prop]], $sep) ) query: {cypher} parameters: {cypher_dict} A method that returns an RDF serialization of a subgraph specified by :cypher query :param cypher: cypher query to return a subgraph :param cypher_dict: parameters required for the cypher query :param format: RDF format in which to serialize output :return: str - RDF serialization of subgraph # TODO: switch to detached HTTP endpoint when code from neo4j is available # see https://community.neo4j.com/t/export-procedure-that-returns-serialized-rdf/38781/2 query: {cypher} parameters: {cypher_dict} A method that creates/merges appropriate nodes in Neo4j as specified in the provided :rdf string The nodes will be MERGEd by 'uri' property :param rdf: RDF serialization of Neo4j nodes and relationships :param format: RDF serialization format :return: returns a dictionary with keys triplesParsed, triplesLoaded as a summary of the operation CALL n10s.rdf.import.inline($rdf, $format) YIELD triplesParsed, triplesLoaded, extraInfo RETURN * # cypher_dict = {'rdf':rdf.encode('utf-8').decode('utf-8'), 'format': format} query: {cypher} parameters: {cypher_dict} # in case labels with spaces where serialized new labels with spaces being replaced with %20 could have been created # this helper function is supposed to revert the change UNWIND $labels as label CALL apoc.refactor.rename.label(label, apoc.text.regreplace(label, '%20', ' ')) YIELD batches, failedBatches, total, failedOperations RETURN batches, failedBatches, total, failedOperations query: {cypher} parameters: {cypher_dict} # in case properties with spaces where serialized new properties with spaces being replaced with %20 could have been created # this helper function is supposed to revert the change CALL db.schema.nodeTypeProperties() YIELD nodeLabels, propertyName WHERE propertyName contains "%20" CALL apoc.cypher.doIt( 'MATCH (node:`' + apoc.text.join(nodeLabels, '`:`') + '`) ' + 'WHERE "' + propertyName + '" in keys(node)' + 'SET node.`' + apoc.text.replace(propertyName, '%20', ' ') + '` = node.`' + propertyName + '`' + 'REMOVE node.`' + propertyName + '`' , {} ) YIELD value RETURN value['node'] query: {cypher2} parameters: {cypher_dict2} # URIs - replace selected encoded values with their original characters (for readability) MATCH (n) WHERE n.uri is not null SET n.uri = apoc.text.replace(n.uri, '%23', '#') SET n.uri = apoc.text.replace(n.uri, '%2F', '/') SET n.uri = apoc.text.replace(n.uri, '%3A', ':') query: {cypher3} parameters: {cypher_dict3} A method that returns an ontology autogenerated from existing nodes in Neo4j (provided by n10s(neosemantics) library :return: str - serialized ontology
| 2.818413
| 3
|
zun/db/sqlalchemy/alembic/versions/cf46a28f46bc_add_container_actions_table.py
|
wanghuiict/zun
| 83
|
6625432
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add container_actions table
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-12-01 10:47:00.192171
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'container_actions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=255), nullable=True),
sa.Column('container_uuid', sa.String(length=36), nullable=False),
sa.Column('request_id', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.String(length=255), nullable=True),
sa.Column('project_id', sa.String(length=255), nullable=True),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('finish_time', sa.DateTime(), nullable=True),
sa.Column('message', sa.String(length=255), nullable=True),
sa.Index('container_uuid_idx', 'container_uuid'),
sa.Index('request_id_idx', 'request_id'),
sa.ForeignKeyConstraint(['container_uuid'], ['container.uuid'], ),
sa.PrimaryKeyConstraint('id')
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add container_actions table
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-12-01 10:47:00.192171
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'container_actions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('action', sa.String(length=255), nullable=True),
sa.Column('container_uuid', sa.String(length=36), nullable=False),
sa.Column('request_id', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.String(length=255), nullable=True),
sa.Column('project_id', sa.String(length=255), nullable=True),
sa.Column('start_time', sa.DateTime(), nullable=True),
sa.Column('finish_time', sa.DateTime(), nullable=True),
sa.Column('message', sa.String(length=255), nullable=True),
sa.Index('container_uuid_idx', 'container_uuid'),
sa.Index('request_id_idx', 'request_id'),
sa.ForeignKeyConstraint(['container_uuid'], ['container.uuid'], ),
sa.PrimaryKeyConstraint('id')
)
|
en
| 0.800344
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. add container_actions table Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2017-12-01 10:47:00.192171 # revision identifiers, used by Alembic.
| 1.854996
| 2
|
simpledecorators/Safe.py
|
shaddyx/simpleDecorators
| 1
|
6625433
|
from functools import wraps
def Safe(exception=Exception):
"""safe function call (if function raises an exception) it will be ignored"""
def decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except exception:
pass
return wrapped
return decorator
|
from functools import wraps
def Safe(exception=Exception):
"""safe function call (if function raises an exception) it will be ignored"""
def decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except exception:
pass
return wrapped
return decorator
|
en
| 0.519177
|
safe function call (if function raises an exception) it will be ignored
| 3.671502
| 4
|
utilities/data_preprocessor/scripts/get_PCD.py
|
alanjclark/autoware.ai
| 2
|
6625434
|
#!/usr/bin/env python3
import sys
import os
import rospy
import numpy as np
import cv2
import pcl
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from cv_bridge import CvBridge
save_path = None
def cloud_loader(msg):
timestamp = msg.header.stamp.secs + ((msg.header.stamp.nsecs + 0.0) / 1000000000)
save_pcd(msg, timestamp, save_path)
def save_pcd(cloud, timestamp, path):
p = pcl.PointCloud(np.array(list(pc2.read_points(cloud)), dtype=np.float32)[:, 0:3])
p.to_file(path + '/pcd' + '_' + "{:.5f}".format(timestamp) + '.pcd')
def rosbag_data_extract_sample():
global save_path
try:
save_path = sys.argv[1]
topic = sys.argv[2]
except Exception as e:
#sys.exit("Please specify the save path. Example: rosbag_data_extract_unsync.py /media/0/output/")
save_path = './sample'
node_name = "get_%s_and_convert_to_PCD_data" % topic
rospy.init_node('rosbag_pcd_extract_unsync', anonymous=True)
rospy.Subscriber(topic, PointCloud2, cloud_loader)
rospy.spin()
if __name__ == '__main__':
rosbag_data_extract_sample()
|
#!/usr/bin/env python3
import sys
import os
import rospy
import numpy as np
import cv2
import pcl
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from cv_bridge import CvBridge
save_path = None
def cloud_loader(msg):
timestamp = msg.header.stamp.secs + ((msg.header.stamp.nsecs + 0.0) / 1000000000)
save_pcd(msg, timestamp, save_path)
def save_pcd(cloud, timestamp, path):
p = pcl.PointCloud(np.array(list(pc2.read_points(cloud)), dtype=np.float32)[:, 0:3])
p.to_file(path + '/pcd' + '_' + "{:.5f}".format(timestamp) + '.pcd')
def rosbag_data_extract_sample():
global save_path
try:
save_path = sys.argv[1]
topic = sys.argv[2]
except Exception as e:
#sys.exit("Please specify the save path. Example: rosbag_data_extract_unsync.py /media/0/output/")
save_path = './sample'
node_name = "get_%s_and_convert_to_PCD_data" % topic
rospy.init_node('rosbag_pcd_extract_unsync', anonymous=True)
rospy.Subscriber(topic, PointCloud2, cloud_loader)
rospy.spin()
if __name__ == '__main__':
rosbag_data_extract_sample()
|
fa
| 0.060346
|
#!/usr/bin/env python3 #sys.exit("Please specify the save path. Example: rosbag_data_extract_unsync.py /media/0/output/")
| 2.313628
| 2
|
examples/add_bond.py
|
oliel/python-ovirt-engine-sdk4
| 3
|
6625435
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import ovirtsdk4 as sdk
import ovirtsdk4.types as types
logging.basicConfig(level=logging.DEBUG, filename='example.log')
# This example configures the networking of a host, adding a bonded
# interface and attaching it to a network with an static IP address.
# Connect to the host:
connection = sdk.Connection(
url='https://engine40.example.com/ovirt-engine/api',
username='admin@internal',
password='<PASSWORD>',
ca_file='/etc/pki/ovirt-engine/ca.pem',
debug=True,
log=logging.getLogger(),
)
# Find the service that manages the collection of hosts:
hosts_service = connection.system_service().hosts_service()
# Find the host:
host = hosts_service.list(search='name=myhost')[0]
# Find the service that manages the host:
host_service = hosts_service.host_service(host.id)
# Configure the network adding a bond with two slaves and attaching it to a
# network with an static IP address:
host_service.setup_networks(
modified_bonds=[
types.HostNic(
name='bond0',
bonding=types.Bonding(
options=[
types.Option(
name='mode',
value='1',
),
types.Option(
name='miimon',
value='100',
),
],
slaves=[
types.HostNic(
name='eth1',
),
types.HostNic(
name='eth2',
),
],
),
),
],
modified_network_attachments=[
types.NetworkAttachment(
network=types.Network(
name='mynetwork',
),
host_nic=types.HostNic(
name='bond0',
),
ip_address_assignments=[
types.IpAddressAssignment(
assignment_method=types.BootProtocol.STATIC,
ip=types.Ip(
address='192.168.122.100',
netmask='255.255.255.0',
),
),
],
),
],
)
# After modifying the network configuration it is very important to make it
# persistent:
host_service.commit_net_config()
# Close the connection to the server:
connection.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import ovirtsdk4 as sdk
import ovirtsdk4.types as types
logging.basicConfig(level=logging.DEBUG, filename='example.log')
# This example configures the networking of a host, adding a bonded
# interface and attaching it to a network with an static IP address.
# Connect to the host:
connection = sdk.Connection(
url='https://engine40.example.com/ovirt-engine/api',
username='admin@internal',
password='<PASSWORD>',
ca_file='/etc/pki/ovirt-engine/ca.pem',
debug=True,
log=logging.getLogger(),
)
# Find the service that manages the collection of hosts:
hosts_service = connection.system_service().hosts_service()
# Find the host:
host = hosts_service.list(search='name=myhost')[0]
# Find the service that manages the host:
host_service = hosts_service.host_service(host.id)
# Configure the network adding a bond with two slaves and attaching it to a
# network with an static IP address:
host_service.setup_networks(
modified_bonds=[
types.HostNic(
name='bond0',
bonding=types.Bonding(
options=[
types.Option(
name='mode',
value='1',
),
types.Option(
name='miimon',
value='100',
),
],
slaves=[
types.HostNic(
name='eth1',
),
types.HostNic(
name='eth2',
),
],
),
),
],
modified_network_attachments=[
types.NetworkAttachment(
network=types.Network(
name='mynetwork',
),
host_nic=types.HostNic(
name='bond0',
),
ip_address_assignments=[
types.IpAddressAssignment(
assignment_method=types.BootProtocol.STATIC,
ip=types.Ip(
address='192.168.122.100',
netmask='255.255.255.0',
),
),
],
),
],
)
# After modifying the network configuration it is very important to make it
# persistent:
host_service.commit_net_config()
# Close the connection to the server:
connection.close()
|
en
| 0.866647
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This example configures the networking of a host, adding a bonded # interface and attaching it to a network with an static IP address. # Connect to the host: # Find the service that manages the collection of hosts: # Find the host: # Find the service that manages the host: # Configure the network adding a bond with two slaves and attaching it to a # network with an static IP address: # After modifying the network configuration it is very important to make it # persistent: # Close the connection to the server:
| 2.164955
| 2
|
src/model/game.py
|
cpatrasciuc/schnapsen-card-game
| 0
|
6625436
|
# Copyright (c) 2021 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from typing import List, Any
from model.game_state import GameState
from model.player_action import PlayerAction
from model.player_id import PlayerId
class Game:
"""
A class that stores a game of Schnapsen: it stores the initial state (i.e.,
the dealer ID and the random seed used to shuffle the deck) and a list of
player actions that were performed so far. The game could not be over yet.
It supports pickling/unpickling.
"""
def __init__(self, dealer: PlayerId, seed: Any):
self._dealer = dealer
self._seed = seed
self._game_state: GameState = GameState.new(dealer=dealer, random_seed=seed)
self._actions: List[PlayerAction] = []
@property
def game_state(self) -> GameState:
return self._game_state
@property
def actions(self) -> List[PlayerAction]:
"""The list of player actions executed so far."""
return self._actions
@property
def seed(self) -> Any:
"""The seed used to shuffle the deck at the beginning of the game."""
return self._seed
@property
def dealer(self) -> PlayerId:
"""The player that was the dealer at the beginning of the game."""
return self._dealer
def play_action(self, action: PlayerAction) -> None:
"""
Executes the given player action. The action must be a legal action in the
current state of the game.
"""
assert not self._game_state.is_game_over
self._actions.append(action)
self._game_state = action.execute(self._game_state)
def __getstate__(self):
"""
Returns the object that should be saved during pickling.
Doesn't export the current game state. It can be recreated by starting from
the same initial state and performing all the actions when unpickling.
"""
state = self.__dict__.copy()
del state['_game_state']
return state
def __setstate__(self, state):
"""
Restore the instance variable and recreate the game state by executing all
the player actions.
"""
self.__dict__.update(state)
self._game_state: GameState = GameState.new(dealer=self._dealer,
random_seed=self._seed)
for action in self._actions:
self._game_state = action.execute(self._game_state)
|
# Copyright (c) 2021 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from typing import List, Any
from model.game_state import GameState
from model.player_action import PlayerAction
from model.player_id import PlayerId
class Game:
"""
A class that stores a game of Schnapsen: it stores the initial state (i.e.,
the dealer ID and the random seed used to shuffle the deck) and a list of
player actions that were performed so far. The game could not be over yet.
It supports pickling/unpickling.
"""
def __init__(self, dealer: PlayerId, seed: Any):
self._dealer = dealer
self._seed = seed
self._game_state: GameState = GameState.new(dealer=dealer, random_seed=seed)
self._actions: List[PlayerAction] = []
@property
def game_state(self) -> GameState:
return self._game_state
@property
def actions(self) -> List[PlayerAction]:
"""The list of player actions executed so far."""
return self._actions
@property
def seed(self) -> Any:
"""The seed used to shuffle the deck at the beginning of the game."""
return self._seed
@property
def dealer(self) -> PlayerId:
"""The player that was the dealer at the beginning of the game."""
return self._dealer
def play_action(self, action: PlayerAction) -> None:
"""
Executes the given player action. The action must be a legal action in the
current state of the game.
"""
assert not self._game_state.is_game_over
self._actions.append(action)
self._game_state = action.execute(self._game_state)
def __getstate__(self):
"""
Returns the object that should be saved during pickling.
Doesn't export the current game state. It can be recreated by starting from
the same initial state and performing all the actions when unpickling.
"""
state = self.__dict__.copy()
del state['_game_state']
return state
def __setstate__(self, state):
"""
Restore the instance variable and recreate the game state by executing all
the player actions.
"""
self.__dict__.update(state)
self._game_state: GameState = GameState.new(dealer=self._dealer,
random_seed=self._seed)
for action in self._actions:
self._game_state = action.execute(self._game_state)
|
en
| 0.958702
|
# Copyright (c) 2021 <NAME>. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. A class that stores a game of Schnapsen: it stores the initial state (i.e., the dealer ID and the random seed used to shuffle the deck) and a list of player actions that were performed so far. The game could not be over yet. It supports pickling/unpickling. The list of player actions executed so far. The seed used to shuffle the deck at the beginning of the game. The player that was the dealer at the beginning of the game. Executes the given player action. The action must be a legal action in the current state of the game. Returns the object that should be saved during pickling. Doesn't export the current game state. It can be recreated by starting from the same initial state and performing all the actions when unpickling. Restore the instance variable and recreate the game state by executing all the player actions.
| 3.313564
| 3
|
helpers.py
|
Raymw1/birthdaysv2
| 3
|
6625437
|
from functools import wraps
from flask import redirect, render_template, session
def apology(page, message, code=400, data=None):
return render_template(page, error=message, data=data), code
def apologyBirth(page, message, birthdays, code=400, data=None):
return render_template(page, error=message, data=data, birthdays=birthdays), code
def login_required(f):
"""
Decorate routes to require login.
https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/")
return f(*args, **kwargs)
return decorated_function
def is_logged(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is not None:
return redirect("/birthdays")
return f(*args, **kwargs)
return decorated_function
|
from functools import wraps
from flask import redirect, render_template, session
def apology(page, message, code=400, data=None):
return render_template(page, error=message, data=data), code
def apologyBirth(page, message, birthdays, code=400, data=None):
return render_template(page, error=message, data=data, birthdays=birthdays), code
def login_required(f):
"""
Decorate routes to require login.
https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/")
return f(*args, **kwargs)
return decorated_function
def is_logged(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is not None:
return redirect("/birthdays")
return f(*args, **kwargs)
return decorated_function
|
en
| 0.542754
|
Decorate routes to require login. https://flask.palletsprojects.com/en/1.1.x/patterns/viewdecorators/
| 2.73749
| 3
|
nydus/db/backends/riak.py
|
ageron/nydus
| 2
|
6625438
|
<reponame>ageron/nydus<gh_stars>1-10
"""
nydus.db.backends.riak
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import httplib
from riak import RiakClient, RiakError
from nydus.db.backends import BaseConnection
class Riak(BaseConnection):
# Exceptions that can be retried by this backend
retryable_exceptions = frozenset([socket.error, httplib.HTTPException, RiakError])
supports_pipelines = False
def __init__(self, host='127.0.0.1', port=8098, prefix='riak', mapred_prefix='mapred', client_id=None, **options):
self.host = host
self.port = port
self.prefix = prefix
self.mapred_prefix = mapred_prefix
self.client_id = client_id
super(Riak, self).__init__(**options)
@property
def identifier(self):
mapping = vars(self)
return "http://%(host)s:%(port)s/%(prefix)s" % mapping
def connect(self):
return RiakClient(host=self.host, port=self.port, prefix=self.prefix,\
mapred_prefix=self.mapred_prefix, client_id=self.client_id)
def disconnect(self):
pass
|
"""
nydus.db.backends.riak
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import httplib
from riak import RiakClient, RiakError
from nydus.db.backends import BaseConnection
class Riak(BaseConnection):
# Exceptions that can be retried by this backend
retryable_exceptions = frozenset([socket.error, httplib.HTTPException, RiakError])
supports_pipelines = False
def __init__(self, host='127.0.0.1', port=8098, prefix='riak', mapred_prefix='mapred', client_id=None, **options):
self.host = host
self.port = port
self.prefix = prefix
self.mapred_prefix = mapred_prefix
self.client_id = client_id
super(Riak, self).__init__(**options)
@property
def identifier(self):
mapping = vars(self)
return "http://%(host)s:%(port)s/%(prefix)s" % mapping
def connect(self):
return RiakClient(host=self.host, port=self.port, prefix=self.prefix,\
mapred_prefix=self.mapred_prefix, client_id=self.client_id)
def disconnect(self):
pass
|
en
| 0.588107
|
nydus.db.backends.riak ~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2011 DISQUS. :license: Apache License 2.0, see LICENSE for more details. # Exceptions that can be retried by this backend
| 2.077894
| 2
|
div_p2p/test_thread.py
|
mliroz/diversity_p2p
| 0
|
6625439
|
import os
from threading import Thread
from execo.action import TaktukPut
from execo.log import style
from execo_engine import logger
from div_p2p.wrapper import DivP2PWrapper
class TestThread(Thread):
"""This class manages the consumption and execution of combinations."""
def __init__(self, host, comb_manager, stats_manager):
super(TestThread, self).__init__()
self.div_p2p = DivP2PWrapper(host)
self.comb_manager = comb_manager
self.stats_manager = stats_manager
self.comb = None
self.ds_id = -1
self.comb_id = -1
def _th_prefix(self):
return style.user1("[" + self.name + "] ")
def run(self):
while len(self.comb_manager.sweeper.get_remaining()) > 0:
# Getting the next combination (which uses a new dataset)
comb = self.comb_manager.sweeper.get_next()
if comb:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
self.ds_id = self.comb_manager.get_ds_id(comb)
ds_comb = self.prepare_dataset(comb)
self.xp(comb, ds_comb)
# subloop over the combinations that use the same dataset
while True:
comb_in_ds = self.comb_manager.sweeper.get_next(
lambda r: filter(self._uses_same_ds, r))
if comb_in_ds:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
try:
self.xp(comb_in_ds, ds_comb)
except:
break
else:
break
def _uses_same_ds(self, candidate_comb):
"""Determine if the candidate combination uses the same dataset as the
current one.
Args:
candidate_comb (dict): The combination candidate to be selected as the
new combination.
"""
return self.comb_manager.uses_same_ds(self.comb, candidate_comb)
def prepare_dataset(self, comb):
"""Prepare the dataset to be used in the next set of experiments.
Args:
comb (dict): The combination containing the dataset's parameters.
Returns:
dict: The dataset parameters.
"""
# Create ds_comb
(ds_class_name, ds_params) = self.comb_manager.get_ds_class_params(comb)
local_path = ds_params["local_path"]
remote_path = os.path.join(self.div_p2p.remote_dir,
os.path.basename(local_path))
ds_comb = {"ds.class.path": remote_path, "ds.class": ds_class_name}
# Copy dataset to host
logger.info(self._th_prefix() + "Prepare dataset with combination " +
str(self.comb_manager.get_ds_parameters(comb)))
copy_code = TaktukPut([self.div_p2p.host], [local_path], remote_path)
copy_code.run()
# Notify stats manager
self.stats_manager.add_ds(self.ds_id, comb)
return ds_comb
def xp(self, comb, ds_comb):
"""Perform the experiment corresponding to the given combination.
Args:
comb (dict): The combination with the experiment's parameters.
ds_comb (dict): The dataset parameters.
"""
comb_ok = False
try:
logger.info(self._th_prefix() +
"Execute experiment with combination " +
str(self.comb_manager.get_xp_parameters(comb)))
num_reps = self.comb_manager.get_num_repetitions()
for nr in range(0, num_reps):
if num_reps > 1:
logger.info(self._th_prefix() + "Repetition " + str(nr + 1))
# Change configuration
params = {}
for key in comb:
params[key] = comb[key]
for key in ds_comb:
params[key] = ds_comb[key]
self.div_p2p.change_conf(params)
# Execute job
stats_file = self.div_p2p.execute()
# Notify stats manager
self.stats_manager.add_xp(self.comb_id, comb, stats_file)
comb_ok = True
finally:
if comb_ok:
self.comb_manager.sweeper.done(comb)
else:
self.comb_manager.sweeper.cancel(comb)
logger.info('%s Remaining',
len(self.comb_manager.sweeper.get_remaining()))
|
import os
from threading import Thread
from execo.action import TaktukPut
from execo.log import style
from execo_engine import logger
from div_p2p.wrapper import DivP2PWrapper
class TestThread(Thread):
"""This class manages the consumption and execution of combinations."""
def __init__(self, host, comb_manager, stats_manager):
super(TestThread, self).__init__()
self.div_p2p = DivP2PWrapper(host)
self.comb_manager = comb_manager
self.stats_manager = stats_manager
self.comb = None
self.ds_id = -1
self.comb_id = -1
def _th_prefix(self):
return style.user1("[" + self.name + "] ")
def run(self):
while len(self.comb_manager.sweeper.get_remaining()) > 0:
# Getting the next combination (which uses a new dataset)
comb = self.comb_manager.sweeper.get_next()
if comb:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
self.ds_id = self.comb_manager.get_ds_id(comb)
ds_comb = self.prepare_dataset(comb)
self.xp(comb, ds_comb)
# subloop over the combinations that use the same dataset
while True:
comb_in_ds = self.comb_manager.sweeper.get_next(
lambda r: filter(self._uses_same_ds, r))
if comb_in_ds:
self.comb = comb
self.comb_id = self.comb_manager.get_comb_id(comb)
try:
self.xp(comb_in_ds, ds_comb)
except:
break
else:
break
def _uses_same_ds(self, candidate_comb):
"""Determine if the candidate combination uses the same dataset as the
current one.
Args:
candidate_comb (dict): The combination candidate to be selected as the
new combination.
"""
return self.comb_manager.uses_same_ds(self.comb, candidate_comb)
def prepare_dataset(self, comb):
"""Prepare the dataset to be used in the next set of experiments.
Args:
comb (dict): The combination containing the dataset's parameters.
Returns:
dict: The dataset parameters.
"""
# Create ds_comb
(ds_class_name, ds_params) = self.comb_manager.get_ds_class_params(comb)
local_path = ds_params["local_path"]
remote_path = os.path.join(self.div_p2p.remote_dir,
os.path.basename(local_path))
ds_comb = {"ds.class.path": remote_path, "ds.class": ds_class_name}
# Copy dataset to host
logger.info(self._th_prefix() + "Prepare dataset with combination " +
str(self.comb_manager.get_ds_parameters(comb)))
copy_code = TaktukPut([self.div_p2p.host], [local_path], remote_path)
copy_code.run()
# Notify stats manager
self.stats_manager.add_ds(self.ds_id, comb)
return ds_comb
def xp(self, comb, ds_comb):
"""Perform the experiment corresponding to the given combination.
Args:
comb (dict): The combination with the experiment's parameters.
ds_comb (dict): The dataset parameters.
"""
comb_ok = False
try:
logger.info(self._th_prefix() +
"Execute experiment with combination " +
str(self.comb_manager.get_xp_parameters(comb)))
num_reps = self.comb_manager.get_num_repetitions()
for nr in range(0, num_reps):
if num_reps > 1:
logger.info(self._th_prefix() + "Repetition " + str(nr + 1))
# Change configuration
params = {}
for key in comb:
params[key] = comb[key]
for key in ds_comb:
params[key] = ds_comb[key]
self.div_p2p.change_conf(params)
# Execute job
stats_file = self.div_p2p.execute()
# Notify stats manager
self.stats_manager.add_xp(self.comb_id, comb, stats_file)
comb_ok = True
finally:
if comb_ok:
self.comb_manager.sweeper.done(comb)
else:
self.comb_manager.sweeper.cancel(comb)
logger.info('%s Remaining',
len(self.comb_manager.sweeper.get_remaining()))
|
en
| 0.735843
|
This class manages the consumption and execution of combinations. # Getting the next combination (which uses a new dataset) # subloop over the combinations that use the same dataset Determine if the candidate combination uses the same dataset as the current one. Args: candidate_comb (dict): The combination candidate to be selected as the new combination. Prepare the dataset to be used in the next set of experiments. Args: comb (dict): The combination containing the dataset's parameters. Returns: dict: The dataset parameters. # Create ds_comb # Copy dataset to host # Notify stats manager Perform the experiment corresponding to the given combination. Args: comb (dict): The combination with the experiment's parameters. ds_comb (dict): The dataset parameters. # Change configuration # Execute job # Notify stats manager
| 2.571314
| 3
|
py-polars/polars/lazy/frame.py
|
elsuizo/polars
| 0
|
6625440
|
"""
This module contains all expressions and classes needed for lazy computation/ query execution.
"""
import os
import shutil
import subprocess
import tempfile
import typing as tp
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
import polars as pl
try:
from polars.polars import PyExpr, PyLazyFrame, PyLazyGroupBy
_DOCUMENTING = False
except ImportError:
_DOCUMENTING = True
from ..datatypes import DataType, pytype_to_polars_type
from ..utils import _process_null_values
from .expr import Expr, _selection_to_pyexpr_list, col, expr_to_lit_or_expr, lit
__all__ = [
"LazyFrame",
]
def wrap_ldf(ldf: "PyLazyFrame") -> "LazyFrame":
return LazyFrame._from_pyldf(ldf)
class LazyFrame:
"""
Representation of a Lazy computation graph/ query.
"""
def __init__(self) -> None:
self._ldf: PyLazyFrame
@staticmethod
def _from_pyldf(ldf: "PyLazyFrame") -> "LazyFrame":
self = LazyFrame.__new__(LazyFrame)
self._ldf = ldf
return self
@staticmethod
def scan_csv(
file: str,
has_headers: bool = True,
ignore_errors: bool = False,
sep: str = ",",
skip_rows: int = 0,
stop_after_n_rows: Optional[int] = None,
cache: bool = True,
dtype: Optional[Dict[str, Type[DataType]]] = None,
low_memory: bool = False,
comment_char: Optional[str] = None,
quote_char: Optional[str] = r'"',
null_values: Optional[Union[str, tp.List[str], Dict[str, str]]] = None,
) -> "LazyFrame":
"""
See Also: `pl.scan_csv`
"""
dtype_list: Optional[tp.List[Tuple[str, Type[DataType]]]] = None
if dtype is not None:
dtype_list = []
for k, v in dtype.items():
dtype_list.append((k, pytype_to_polars_type(v)))
processed_null_values = _process_null_values(null_values)
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_csv(
file,
sep,
has_headers,
ignore_errors,
skip_rows,
stop_after_n_rows,
cache,
dtype_list,
low_memory,
comment_char,
quote_char,
processed_null_values,
)
return self
@staticmethod
def scan_parquet(
file: str, stop_after_n_rows: Optional[int] = None, cache: bool = True
) -> "LazyFrame":
"""
See Also: `pl.scan_parquet`
"""
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_parquet(file, stop_after_n_rows, cache)
return self
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
"""
return func(self, *args, **kwargs)
def describe_plan(self) -> str:
"""
A string representation of the unoptimized query plan.
"""
return self._ldf.describe_plan()
def describe_optimized_plan(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
) -> str:
"""
A string representation of the optimized query plan.
"""
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache=False,
)
return ldf.describe_optimized_plan()
def show_graph(
self,
optimized: bool = True,
show: bool = True,
output_path: Optional[str] = None,
raw_output: bool = False,
figsize: Tuple[float, float] = (16.0, 12.0),
) -> Optional[str]:
"""
Show a plot of the query plan. Note that you should have graphviz installed.
Parameters
----------
optimized
Optimize the query plan.
show
Show the figure.
output_path
Write the figure to disk.
raw_output
Return dot syntax.
figsize
Passed to matlotlib if `show` == True.
"""
try:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"Graphviz dot binary should be on your PATH and matplotlib should be installed to show graph."
)
dot = self._ldf.to_dot(optimized)
if raw_output:
return dot
with tempfile.TemporaryDirectory() as tmpdir_name:
dot_path = os.path.join(tmpdir_name, "dot")
with open(dot_path, "w") as f:
f.write(dot)
subprocess.run(["dot", "-Nshape=box", "-Tpng", "-O", dot_path])
out_path = os.path.join(tmpdir_name, "dot.png")
if output_path is not None:
shutil.copy(out_path, output_path)
if show:
plt.figure(figsize=figsize)
img = mpimg.imread(out_path)
plt.imshow(img)
plt.show()
return None
def inspect(self, fmt: str = "{}") -> "pl.LazyFrame": # type: ignore
"""
Prints the value that this node in the computation graph evaluates to and passes on the value.
>>> (df.select(col("foo").cumsum().alias("bar"))
>>> .inspect() # print the node before the filter
>>> .filter(col("bar") == col("foo")))
"""
def inspect(s: "pl.DataFrame") -> "pl.DataFrame":
print(fmt.format(s)) # type: ignore
return s
return self.map(inspect, predicate_pushdown=True, projection_pushdown=True)
def sort(
self,
by: Union[str, "Expr", tp.List["Expr"]],
reverse: Union[bool, tp.List[bool]] = False,
) -> "LazyFrame":
"""
Sort the DataFrame by:
- A single column name
- An expression
- Multiple expressions
Parameters
----------
by
Column (expressions) to sort by.
reverse
Whether or not to sort in reverse order.
"""
if type(by) is str:
return wrap_ldf(self._ldf.sort(by, reverse))
if type(reverse) is bool:
reverse = [reverse]
by = expr_to_lit_or_expr(by, str_to_lit=False)
by = _selection_to_pyexpr_list(by)
return wrap_ldf(self._ldf.sort_by_exprs(by, reverse))
def collect(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = False,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Collect into a DataFrame.
Parameters
----------
type_coercion
Do type coercion optimization.
predicate_pushdown
Do predicate pushdown optimization.
projection_pushdown
Do projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
Caution!
If you already have set a global string cache, set this to `False` as this will reset the
global cache when the query is finished.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.collect())
def fetch(
self,
n_rows: int = 500,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = True,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Fetch is like a collect operation, but it overwrites the number of rows read by every scan
operation. This is a utility that helps debug a query on a smaller number of rows.
Note that the fetch does not guarantee the final number of rows in the DataFrame.
Filter, join operations and a lower number of rows available in the scanned file influence
the final number of rows.
Parameters
----------
n_rows
Collect n_rows from the data sources.
type_coercion
Run type coercion optimization.
predicate_pushdown
Run predicate pushdown optimization.
projection_pushdown
Run projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.fetch(n_rows))
@property
def columns(self) -> tp.List[str]:
"""
Get or set column names.
Examples
--------
>>> df = (pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> }).lazy()
>>> .select(["foo", "bar"]))
>>> df.columns
["foo", "bar"]
"""
return self._ldf.columns()
def cache(
self,
) -> "LazyFrame":
"""
Cache the result once the execution of the physical plan hits this node.
"""
return wrap_ldf(self._ldf.cache())
def filter(self, predicate: "Expr") -> "LazyFrame":
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
"""
if isinstance(predicate, str):
predicate = col(predicate)
return wrap_ldf(self._ldf.filter(predicate._pyexpr))
def select(
self, exprs: Union[str, "Expr", Sequence[str], Sequence["Expr"]]
) -> "LazyFrame":
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
"""
exprs = _selection_to_pyexpr_list(exprs)
return wrap_ldf(self._ldf.select(exprs))
def groupby(
self,
by: Union[str, tp.List[str], "Expr", tp.List["Expr"]],
maintain_order: bool = False,
) -> "LazyGroupBy":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
"""
new_by: tp.List[PyExpr]
if isinstance(by, list):
new_by = []
for e in by:
if isinstance(e, str):
e = col(e)
new_by.append(e._pyexpr)
elif isinstance(by, str):
new_by = [col(by)._pyexpr]
elif isinstance(by, Expr):
new_by = [by._pyexpr]
lgb = self._ldf.groupby(new_by, maintain_order)
return LazyGroupBy(lgb)
def join(
self,
ldf: "LazyFrame",
left_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
right_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
how: str = "inner",
suffix: str = "_right",
allow_parallel: bool = True,
force_parallel: bool = False,
asof_by: Optional[Union[str, tp.List[str]]] = None,
asof_by_left: Optional[Union[str, tp.List[str]]] = None,
asof_by_right: Optional[Union[str, tp.List[str]]] = None,
) -> "LazyFrame":
"""
Add a join operation to the Logical Plan.
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
how
one of:
"inner"
"left"
"outer"
"asof",
"cross"
suffix
Suffix to append to columns with a duplicate name.
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
# Asof joins
This is similar to a left-join except that we match on nearest key rather than equal keys.
The keys must be sorted to perform an asof join
"""
if how == "cross":
return wrap_ldf(
self._ldf.join(
ldf._ldf,
[],
[],
allow_parallel,
force_parallel,
how,
suffix,
[],
[],
)
)
left_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(left_on, (str, Expr)):
left_on_ = [left_on] # type: ignore[assignment]
else:
left_on_ = left_on
right_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(right_on, (str, Expr)):
right_on_ = [right_on] # type: ignore[assignment]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
new_left_on = []
for column in left_on_:
if isinstance(column, str):
column = col(column)
new_left_on.append(column._pyexpr)
new_right_on = []
for column in right_on_:
if isinstance(column, str):
column = col(column)
new_right_on.append(column._pyexpr)
# set asof_by
left_asof_by_: Union[tp.List[str], None]
if isinstance(asof_by_left, str):
left_asof_by_ = [asof_by_left] # type: ignore[assignment]
else:
left_asof_by_ = asof_by_left
right_asof_by_: Union[tp.List[str], None]
if isinstance(asof_by_right, (str, Expr)):
right_asof_by_ = [asof_by_right] # type: ignore[assignment]
else:
right_asof_by_ = asof_by_right
if isinstance(asof_by, str):
left_asof_by_ = [asof_by]
right_asof_by_ = [asof_by]
elif isinstance(on, list):
left_asof_by_ = asof_by
right_asof_by_ = asof_by
if left_asof_by_ is None:
left_asof_by_ = []
if right_asof_by_ is None:
right_asof_by_ = []
return wrap_ldf(
self._ldf.join(
ldf._ldf,
new_left_on,
new_right_on,
allow_parallel,
force_parallel,
how,
suffix,
left_asof_by_,
right_asof_by_,
)
)
def with_columns(self, exprs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if isinstance(exprs, Expr):
return self.with_column(exprs)
pyexprs = []
for e in exprs:
if isinstance(e, Expr):
pyexprs.append(e._pyexpr)
elif isinstance(e, pl.Series):
pyexprs.append(lit(e)._pyexpr)
return wrap_ldf(self._ldf.with_columns(pyexprs))
def with_column(self, expr: "Expr") -> "LazyFrame":
"""
Add or overwrite column in a DataFrame.
Parameters
----------
expr
Expression that evaluates to column.
"""
return self.with_columns([expr])
def drop(self, columns: Union[str, tp.List[str]]) -> "LazyFrame":
"""
Remove one or multiple columns from a DataFrame.
Parameters
----------
columns
- Name of the column that should be removed.
- List of column names.
"""
if isinstance(columns, str):
columns = [columns]
return wrap_ldf(self._ldf.drop_columns(columns))
def with_column_renamed(self, existing_name: str, new_name: str) -> "LazyFrame":
"""
Rename a column in the DataFrame
"""
return wrap_ldf(self._ldf.with_column_renamed(existing_name, new_name))
def rename(self, mapping: Dict[str, str]) -> "LazyFrame":
"""
Rename column names. This does not preserve column order.
Parameters
----------
mapping
Key value pairs that map from old name to new name.
"""
existing = list(mapping.keys())
new = list(mapping.values())
return wrap_ldf(self._ldf.rename(existing, new))
def reverse(self) -> "LazyFrame":
"""
Reverse the DataFrame.
"""
return wrap_ldf(self._ldf.reverse())
def shift(self, periods: int) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
"""
return wrap_ldf(self._ldf.shift(periods))
def shift_and_fill(
self, periods: int, fill_value: Union["Expr", int, str, float]
) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with the result of this expression.
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.shift_and_fill(periods, fill_value._pyexpr))
def slice(self, offset: int, length: int) -> "LazyFrame":
"""
Slice the DataFrame.
Parameters
----------
offset
Start index.
length
Length of the slice.
"""
return wrap_ldf(self._ldf.slice(offset, length))
def limit(self, n: int) -> "LazyFrame":
"""
Limit the DataFrame to the first `n` rows. Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.slice(0, n)
def head(self, n: int) -> "LazyFrame":
"""
Get the first `n` rows of the DataFrame
Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.limit(n)
def tail(self, n: int) -> "LazyFrame":
"""
Get the last `n` rows of the DataFrame.
Parameters
----------
n
Number of rows.
"""
return wrap_ldf(self._ldf.tail(n))
def last(self) -> "LazyFrame":
"""
Get the last row of the DataFrame.
"""
return self.tail(1)
def first(self) -> "LazyFrame":
"""
Get the first row of the DataFrame.
"""
return self.slice(0, 1)
def with_row_count(self, name: str = "row_nr") -> "LazyFrame":
"""
Add a column at index 0 that counts the rows.
Parameters
----------
name
Name of the column to add.
"""
return wrap_ldf(self._ldf.with_row_count(name))
def fill_null(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill missing values
Parameters
----------
fill_value
Value to fill the missing values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_null(fill_value._pyexpr))
def fill_nan(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill floating point NaN values.
..warning::
NOTE that floating point NaN (No a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
Value to fill the NaN values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_nan(fill_value._pyexpr))
def std(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their standard deviation value.
"""
return wrap_ldf(self._ldf.std())
def var(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their variance value.
"""
return wrap_ldf(self._ldf.var())
def max(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their maximum value.
"""
return wrap_ldf(self._ldf.max())
def min(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their minimum value.
"""
return wrap_ldf(self._ldf.min())
def sum(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their sum value.
"""
return wrap_ldf(self._ldf.sum())
def mean(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their mean value.
"""
return wrap_ldf(self._ldf.mean())
def median(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their median value.
"""
return wrap_ldf(self._ldf.median())
def quantile(self, quantile: float) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their quantile value.
"""
return wrap_ldf(self._ldf.quantile(quantile))
def explode(
self, columns: Union[str, tp.List[str], "Expr", tp.List["Expr"]]
) -> "LazyFrame":
"""
Explode lists to long format.
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]]
>>> })
>>> df
shape: (6, 2)
╭─────────┬────────────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ "c" ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "b" ┆ [2, 1, 2] │
╰─────────┴────────────╯
>>> df.explode("nrs")
shape: (13, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
╰─────────┴─────╯
"""
columns = _selection_to_pyexpr_list(columns)
return wrap_ldf(self._ldf.explode(columns))
def drop_duplicates(
self,
maintain_order: bool = False,
subset: Optional[Union[tp.List[str], str]] = None,
) -> "LazyFrame":
"""
Drop duplicate rows from this DataFrame.
Note that this fails if there is a column of type `List` in the DataFrame.
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_duplicates(maintain_order, subset))
def drop_nulls(
self, subset: Optional[Union[tp.List[str], str]] = None
) -> "LazyFrame":
"""
Drop rows with null values from this DataFrame.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, None, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.lazy().drop_nulls().collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
└─────┴─────┴─────┘
This method only drops nulls row-wise if any single value of the row is null.
Below are some example snippets that show how you could drop null values based on other
conditions
>>> df = pl.DataFrame(
>>> {
>>> "a": [None, None, None, None],
>>> "b": [1, 2, None, 1],
>>> "c": [1, None, None, 1],
>>> }
>>> )
>>> df
shape: (4, 3)
┌──────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴──────┴──────┘
>>> # drop a row only if all values are null
>>> df.filter(~pl.fold(acc=True, f=lambda acc, s: acc & s.is_null(), exprs=pl.all()))
shape: (3, 3)
┌──────┬─────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪═════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴─────┴──────┘
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_nulls(subset))
def melt(
self, id_vars: Union[str, tp.List[str]], value_vars: Union[str, tp.List[str]]
) -> "LazyFrame":
"""
Unpivot DataFrame to long format.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
return wrap_ldf(self._ldf.melt(id_vars, value_vars))
def map(
self,
f: Callable[["pl.DataFrame"], "pl.DataFrame"],
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
no_optimizations: bool = False,
) -> "LazyFrame":
"""
Apply a custom function. It is important that the function returns a Polars DataFrame.
Parameters
----------
f
Lambda/ function to apply.
predicate_pushdown
Allow predicate pushdown optimization to pass this node.
projection_pushdown
Allow projection pushdown optimization to pass this node.
no_optimizations
Turn off all optimizations past this point.
"""
if not no_optimizations:
predicate_pushdown = False
projection_pushdown = False
return wrap_ldf(self._ldf.map(f, predicate_pushdown, projection_pushdown))
def interpolate(self) -> "LazyFrame":
"""
Interpolate intermediate values. The interpolation method is linear.
"""
return self.select(pl.col("*").interpolate()) # type: ignore
class LazyGroupBy:
"""
Created by `df.lazy().groupby("foo)"`
"""
def __init__(self, lgb: "PyLazyGroupBy"):
self.lgb = lgb
def agg(self, aggs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Describe the aggregation that need to be done on a group.
Parameters
----------
aggs
Single/ Multiple aggregation expression(s).
Examples
--------
>>> (pl.scan_csv("data.csv")
.groupby("groups")
.agg([
pl.col("name").n_unique().alias("unique_names"),
pl.max("values")
])
)
"""
aggs = _selection_to_pyexpr_list(aggs)
return wrap_ldf(self.lgb.agg(aggs))
def head(self, n: int = 5) -> "LazyFrame":
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .head(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.head(n))
def tail(self, n: int = 5) -> "LazyFrame":
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .tail(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.tail(n))
def apply(self, f: Callable[["pl.DataFrame"], "pl.DataFrame"]) -> "LazyFrame":
"""
Apply a function over the groups as a new `DataFrame`. It is not recommended that you use
this as materializing the `DataFrame` is quite expensive.
Parameters
----------
f
Function to apply over the `DataFrame`.
"""
return wrap_ldf(self.lgb.apply(f))
|
"""
This module contains all expressions and classes needed for lazy computation/ query execution.
"""
import os
import shutil
import subprocess
import tempfile
import typing as tp
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
import polars as pl
try:
from polars.polars import PyExpr, PyLazyFrame, PyLazyGroupBy
_DOCUMENTING = False
except ImportError:
_DOCUMENTING = True
from ..datatypes import DataType, pytype_to_polars_type
from ..utils import _process_null_values
from .expr import Expr, _selection_to_pyexpr_list, col, expr_to_lit_or_expr, lit
__all__ = [
"LazyFrame",
]
def wrap_ldf(ldf: "PyLazyFrame") -> "LazyFrame":
return LazyFrame._from_pyldf(ldf)
class LazyFrame:
"""
Representation of a Lazy computation graph/ query.
"""
def __init__(self) -> None:
self._ldf: PyLazyFrame
@staticmethod
def _from_pyldf(ldf: "PyLazyFrame") -> "LazyFrame":
self = LazyFrame.__new__(LazyFrame)
self._ldf = ldf
return self
@staticmethod
def scan_csv(
file: str,
has_headers: bool = True,
ignore_errors: bool = False,
sep: str = ",",
skip_rows: int = 0,
stop_after_n_rows: Optional[int] = None,
cache: bool = True,
dtype: Optional[Dict[str, Type[DataType]]] = None,
low_memory: bool = False,
comment_char: Optional[str] = None,
quote_char: Optional[str] = r'"',
null_values: Optional[Union[str, tp.List[str], Dict[str, str]]] = None,
) -> "LazyFrame":
"""
See Also: `pl.scan_csv`
"""
dtype_list: Optional[tp.List[Tuple[str, Type[DataType]]]] = None
if dtype is not None:
dtype_list = []
for k, v in dtype.items():
dtype_list.append((k, pytype_to_polars_type(v)))
processed_null_values = _process_null_values(null_values)
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_csv(
file,
sep,
has_headers,
ignore_errors,
skip_rows,
stop_after_n_rows,
cache,
dtype_list,
low_memory,
comment_char,
quote_char,
processed_null_values,
)
return self
@staticmethod
def scan_parquet(
file: str, stop_after_n_rows: Optional[int] = None, cache: bool = True
) -> "LazyFrame":
"""
See Also: `pl.scan_parquet`
"""
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_parquet(file, stop_after_n_rows, cache)
return self
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
"""
return func(self, *args, **kwargs)
def describe_plan(self) -> str:
"""
A string representation of the unoptimized query plan.
"""
return self._ldf.describe_plan()
def describe_optimized_plan(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
) -> str:
"""
A string representation of the optimized query plan.
"""
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache=False,
)
return ldf.describe_optimized_plan()
def show_graph(
self,
optimized: bool = True,
show: bool = True,
output_path: Optional[str] = None,
raw_output: bool = False,
figsize: Tuple[float, float] = (16.0, 12.0),
) -> Optional[str]:
"""
Show a plot of the query plan. Note that you should have graphviz installed.
Parameters
----------
optimized
Optimize the query plan.
show
Show the figure.
output_path
Write the figure to disk.
raw_output
Return dot syntax.
figsize
Passed to matlotlib if `show` == True.
"""
try:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"Graphviz dot binary should be on your PATH and matplotlib should be installed to show graph."
)
dot = self._ldf.to_dot(optimized)
if raw_output:
return dot
with tempfile.TemporaryDirectory() as tmpdir_name:
dot_path = os.path.join(tmpdir_name, "dot")
with open(dot_path, "w") as f:
f.write(dot)
subprocess.run(["dot", "-Nshape=box", "-Tpng", "-O", dot_path])
out_path = os.path.join(tmpdir_name, "dot.png")
if output_path is not None:
shutil.copy(out_path, output_path)
if show:
plt.figure(figsize=figsize)
img = mpimg.imread(out_path)
plt.imshow(img)
plt.show()
return None
def inspect(self, fmt: str = "{}") -> "pl.LazyFrame": # type: ignore
"""
Prints the value that this node in the computation graph evaluates to and passes on the value.
>>> (df.select(col("foo").cumsum().alias("bar"))
>>> .inspect() # print the node before the filter
>>> .filter(col("bar") == col("foo")))
"""
def inspect(s: "pl.DataFrame") -> "pl.DataFrame":
print(fmt.format(s)) # type: ignore
return s
return self.map(inspect, predicate_pushdown=True, projection_pushdown=True)
def sort(
self,
by: Union[str, "Expr", tp.List["Expr"]],
reverse: Union[bool, tp.List[bool]] = False,
) -> "LazyFrame":
"""
Sort the DataFrame by:
- A single column name
- An expression
- Multiple expressions
Parameters
----------
by
Column (expressions) to sort by.
reverse
Whether or not to sort in reverse order.
"""
if type(by) is str:
return wrap_ldf(self._ldf.sort(by, reverse))
if type(reverse) is bool:
reverse = [reverse]
by = expr_to_lit_or_expr(by, str_to_lit=False)
by = _selection_to_pyexpr_list(by)
return wrap_ldf(self._ldf.sort_by_exprs(by, reverse))
def collect(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = False,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Collect into a DataFrame.
Parameters
----------
type_coercion
Do type coercion optimization.
predicate_pushdown
Do predicate pushdown optimization.
projection_pushdown
Do projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
Caution!
If you already have set a global string cache, set this to `False` as this will reset the
global cache when the query is finished.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.collect())
def fetch(
self,
n_rows: int = 500,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = True,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Fetch is like a collect operation, but it overwrites the number of rows read by every scan
operation. This is a utility that helps debug a query on a smaller number of rows.
Note that the fetch does not guarantee the final number of rows in the DataFrame.
Filter, join operations and a lower number of rows available in the scanned file influence
the final number of rows.
Parameters
----------
n_rows
Collect n_rows from the data sources.
type_coercion
Run type coercion optimization.
predicate_pushdown
Run predicate pushdown optimization.
projection_pushdown
Run projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.fetch(n_rows))
@property
def columns(self) -> tp.List[str]:
"""
Get or set column names.
Examples
--------
>>> df = (pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> }).lazy()
>>> .select(["foo", "bar"]))
>>> df.columns
["foo", "bar"]
"""
return self._ldf.columns()
def cache(
self,
) -> "LazyFrame":
"""
Cache the result once the execution of the physical plan hits this node.
"""
return wrap_ldf(self._ldf.cache())
def filter(self, predicate: "Expr") -> "LazyFrame":
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
"""
if isinstance(predicate, str):
predicate = col(predicate)
return wrap_ldf(self._ldf.filter(predicate._pyexpr))
def select(
self, exprs: Union[str, "Expr", Sequence[str], Sequence["Expr"]]
) -> "LazyFrame":
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
"""
exprs = _selection_to_pyexpr_list(exprs)
return wrap_ldf(self._ldf.select(exprs))
def groupby(
self,
by: Union[str, tp.List[str], "Expr", tp.List["Expr"]],
maintain_order: bool = False,
) -> "LazyGroupBy":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
"""
new_by: tp.List[PyExpr]
if isinstance(by, list):
new_by = []
for e in by:
if isinstance(e, str):
e = col(e)
new_by.append(e._pyexpr)
elif isinstance(by, str):
new_by = [col(by)._pyexpr]
elif isinstance(by, Expr):
new_by = [by._pyexpr]
lgb = self._ldf.groupby(new_by, maintain_order)
return LazyGroupBy(lgb)
def join(
self,
ldf: "LazyFrame",
left_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
right_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
how: str = "inner",
suffix: str = "_right",
allow_parallel: bool = True,
force_parallel: bool = False,
asof_by: Optional[Union[str, tp.List[str]]] = None,
asof_by_left: Optional[Union[str, tp.List[str]]] = None,
asof_by_right: Optional[Union[str, tp.List[str]]] = None,
) -> "LazyFrame":
"""
Add a join operation to the Logical Plan.
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
how
one of:
"inner"
"left"
"outer"
"asof",
"cross"
suffix
Suffix to append to columns with a duplicate name.
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
# Asof joins
This is similar to a left-join except that we match on nearest key rather than equal keys.
The keys must be sorted to perform an asof join
"""
if how == "cross":
return wrap_ldf(
self._ldf.join(
ldf._ldf,
[],
[],
allow_parallel,
force_parallel,
how,
suffix,
[],
[],
)
)
left_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(left_on, (str, Expr)):
left_on_ = [left_on] # type: ignore[assignment]
else:
left_on_ = left_on
right_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(right_on, (str, Expr)):
right_on_ = [right_on] # type: ignore[assignment]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
new_left_on = []
for column in left_on_:
if isinstance(column, str):
column = col(column)
new_left_on.append(column._pyexpr)
new_right_on = []
for column in right_on_:
if isinstance(column, str):
column = col(column)
new_right_on.append(column._pyexpr)
# set asof_by
left_asof_by_: Union[tp.List[str], None]
if isinstance(asof_by_left, str):
left_asof_by_ = [asof_by_left] # type: ignore[assignment]
else:
left_asof_by_ = asof_by_left
right_asof_by_: Union[tp.List[str], None]
if isinstance(asof_by_right, (str, Expr)):
right_asof_by_ = [asof_by_right] # type: ignore[assignment]
else:
right_asof_by_ = asof_by_right
if isinstance(asof_by, str):
left_asof_by_ = [asof_by]
right_asof_by_ = [asof_by]
elif isinstance(on, list):
left_asof_by_ = asof_by
right_asof_by_ = asof_by
if left_asof_by_ is None:
left_asof_by_ = []
if right_asof_by_ is None:
right_asof_by_ = []
return wrap_ldf(
self._ldf.join(
ldf._ldf,
new_left_on,
new_right_on,
allow_parallel,
force_parallel,
how,
suffix,
left_asof_by_,
right_asof_by_,
)
)
def with_columns(self, exprs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if isinstance(exprs, Expr):
return self.with_column(exprs)
pyexprs = []
for e in exprs:
if isinstance(e, Expr):
pyexprs.append(e._pyexpr)
elif isinstance(e, pl.Series):
pyexprs.append(lit(e)._pyexpr)
return wrap_ldf(self._ldf.with_columns(pyexprs))
def with_column(self, expr: "Expr") -> "LazyFrame":
"""
Add or overwrite column in a DataFrame.
Parameters
----------
expr
Expression that evaluates to column.
"""
return self.with_columns([expr])
def drop(self, columns: Union[str, tp.List[str]]) -> "LazyFrame":
"""
Remove one or multiple columns from a DataFrame.
Parameters
----------
columns
- Name of the column that should be removed.
- List of column names.
"""
if isinstance(columns, str):
columns = [columns]
return wrap_ldf(self._ldf.drop_columns(columns))
def with_column_renamed(self, existing_name: str, new_name: str) -> "LazyFrame":
"""
Rename a column in the DataFrame
"""
return wrap_ldf(self._ldf.with_column_renamed(existing_name, new_name))
def rename(self, mapping: Dict[str, str]) -> "LazyFrame":
"""
Rename column names. This does not preserve column order.
Parameters
----------
mapping
Key value pairs that map from old name to new name.
"""
existing = list(mapping.keys())
new = list(mapping.values())
return wrap_ldf(self._ldf.rename(existing, new))
def reverse(self) -> "LazyFrame":
"""
Reverse the DataFrame.
"""
return wrap_ldf(self._ldf.reverse())
def shift(self, periods: int) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
"""
return wrap_ldf(self._ldf.shift(periods))
def shift_and_fill(
self, periods: int, fill_value: Union["Expr", int, str, float]
) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with the result of this expression.
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.shift_and_fill(periods, fill_value._pyexpr))
def slice(self, offset: int, length: int) -> "LazyFrame":
"""
Slice the DataFrame.
Parameters
----------
offset
Start index.
length
Length of the slice.
"""
return wrap_ldf(self._ldf.slice(offset, length))
def limit(self, n: int) -> "LazyFrame":
"""
Limit the DataFrame to the first `n` rows. Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.slice(0, n)
def head(self, n: int) -> "LazyFrame":
"""
Get the first `n` rows of the DataFrame
Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.limit(n)
def tail(self, n: int) -> "LazyFrame":
"""
Get the last `n` rows of the DataFrame.
Parameters
----------
n
Number of rows.
"""
return wrap_ldf(self._ldf.tail(n))
def last(self) -> "LazyFrame":
"""
Get the last row of the DataFrame.
"""
return self.tail(1)
def first(self) -> "LazyFrame":
"""
Get the first row of the DataFrame.
"""
return self.slice(0, 1)
def with_row_count(self, name: str = "row_nr") -> "LazyFrame":
"""
Add a column at index 0 that counts the rows.
Parameters
----------
name
Name of the column to add.
"""
return wrap_ldf(self._ldf.with_row_count(name))
def fill_null(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill missing values
Parameters
----------
fill_value
Value to fill the missing values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_null(fill_value._pyexpr))
def fill_nan(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill floating point NaN values.
..warning::
NOTE that floating point NaN (No a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
Value to fill the NaN values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_nan(fill_value._pyexpr))
def std(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their standard deviation value.
"""
return wrap_ldf(self._ldf.std())
def var(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their variance value.
"""
return wrap_ldf(self._ldf.var())
def max(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their maximum value.
"""
return wrap_ldf(self._ldf.max())
def min(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their minimum value.
"""
return wrap_ldf(self._ldf.min())
def sum(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their sum value.
"""
return wrap_ldf(self._ldf.sum())
def mean(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their mean value.
"""
return wrap_ldf(self._ldf.mean())
def median(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their median value.
"""
return wrap_ldf(self._ldf.median())
def quantile(self, quantile: float) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their quantile value.
"""
return wrap_ldf(self._ldf.quantile(quantile))
def explode(
self, columns: Union[str, tp.List[str], "Expr", tp.List["Expr"]]
) -> "LazyFrame":
"""
Explode lists to long format.
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]]
>>> })
>>> df
shape: (6, 2)
╭─────────┬────────────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ "c" ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "b" ┆ [2, 1, 2] │
╰─────────┴────────────╯
>>> df.explode("nrs")
shape: (13, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
╰─────────┴─────╯
"""
columns = _selection_to_pyexpr_list(columns)
return wrap_ldf(self._ldf.explode(columns))
def drop_duplicates(
self,
maintain_order: bool = False,
subset: Optional[Union[tp.List[str], str]] = None,
) -> "LazyFrame":
"""
Drop duplicate rows from this DataFrame.
Note that this fails if there is a column of type `List` in the DataFrame.
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_duplicates(maintain_order, subset))
def drop_nulls(
self, subset: Optional[Union[tp.List[str], str]] = None
) -> "LazyFrame":
"""
Drop rows with null values from this DataFrame.
Examples
--------
>>> df = pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, None, 8],
>>> "ham": ['a', 'b', 'c']
>>> })
>>> df.lazy().drop_nulls().collect()
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ ham │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ str │
╞═════╪═════╪═════╡
│ 1 ┆ 6 ┆ "a" │
├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤
│ 3 ┆ 8 ┆ "c" │
└─────┴─────┴─────┘
This method only drops nulls row-wise if any single value of the row is null.
Below are some example snippets that show how you could drop null values based on other
conditions
>>> df = pl.DataFrame(
>>> {
>>> "a": [None, None, None, None],
>>> "b": [1, 2, None, 1],
>>> "c": [1, None, None, 1],
>>> }
>>> )
>>> df
shape: (4, 3)
┌──────┬──────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ null ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴──────┴──────┘
>>> # drop a row only if all values are null
>>> df.filter(~pl.fold(acc=True, f=lambda acc, s: acc & s.is_null(), exprs=pl.all()))
shape: (3, 3)
┌──────┬─────┬──────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞══════╪═════╪══════╡
│ null ┆ 1 ┆ 1 │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 2 ┆ null │
├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤
│ null ┆ 1 ┆ 1 │
└──────┴─────┴──────┘
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_nulls(subset))
def melt(
self, id_vars: Union[str, tp.List[str]], value_vars: Union[str, tp.List[str]]
) -> "LazyFrame":
"""
Unpivot DataFrame to long format.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
return wrap_ldf(self._ldf.melt(id_vars, value_vars))
def map(
self,
f: Callable[["pl.DataFrame"], "pl.DataFrame"],
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
no_optimizations: bool = False,
) -> "LazyFrame":
"""
Apply a custom function. It is important that the function returns a Polars DataFrame.
Parameters
----------
f
Lambda/ function to apply.
predicate_pushdown
Allow predicate pushdown optimization to pass this node.
projection_pushdown
Allow projection pushdown optimization to pass this node.
no_optimizations
Turn off all optimizations past this point.
"""
if not no_optimizations:
predicate_pushdown = False
projection_pushdown = False
return wrap_ldf(self._ldf.map(f, predicate_pushdown, projection_pushdown))
def interpolate(self) -> "LazyFrame":
"""
Interpolate intermediate values. The interpolation method is linear.
"""
return self.select(pl.col("*").interpolate()) # type: ignore
class LazyGroupBy:
"""
Created by `df.lazy().groupby("foo)"`
"""
def __init__(self, lgb: "PyLazyGroupBy"):
self.lgb = lgb
def agg(self, aggs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Describe the aggregation that need to be done on a group.
Parameters
----------
aggs
Single/ Multiple aggregation expression(s).
Examples
--------
>>> (pl.scan_csv("data.csv")
.groupby("groups")
.agg([
pl.col("name").n_unique().alias("unique_names"),
pl.max("values")
])
)
"""
aggs = _selection_to_pyexpr_list(aggs)
return wrap_ldf(self.lgb.agg(aggs))
def head(self, n: int = 5) -> "LazyFrame":
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .head(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.head(n))
def tail(self, n: int = 5) -> "LazyFrame":
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .tail(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.tail(n))
def apply(self, f: Callable[["pl.DataFrame"], "pl.DataFrame"]) -> "LazyFrame":
"""
Apply a function over the groups as a new `DataFrame`. It is not recommended that you use
this as materializing the `DataFrame` is quite expensive.
Parameters
----------
f
Function to apply over the `DataFrame`.
"""
return wrap_ldf(self.lgb.apply(f))
|
en
| 0.533849
|
This module contains all expressions and classes needed for lazy computation/ query execution. Representation of a Lazy computation graph/ query. See Also: `pl.scan_csv` See Also: `pl.scan_parquet` Apply a function on Self. Parameters ---------- func Callable. args Arguments. kwargs Keyword arguments. A string representation of the unoptimized query plan. A string representation of the optimized query plan. Show a plot of the query plan. Note that you should have graphviz installed. Parameters ---------- optimized Optimize the query plan. show Show the figure. output_path Write the figure to disk. raw_output Return dot syntax. figsize Passed to matlotlib if `show` == True. # type: ignore Prints the value that this node in the computation graph evaluates to and passes on the value. >>> (df.select(col("foo").cumsum().alias("bar")) >>> .inspect() # print the node before the filter >>> .filter(col("bar") == col("foo"))) # type: ignore Sort the DataFrame by: - A single column name - An expression - Multiple expressions Parameters ---------- by Column (expressions) to sort by. reverse Whether or not to sort in reverse order. Collect into a DataFrame. Parameters ---------- type_coercion Do type coercion optimization. predicate_pushdown Do predicate pushdown optimization. projection_pushdown Do projection pushdown optimization. simplify_expression Run simplify expressions optimization. string_cache Use a global string cache in this query. This is needed if you want to join on categorical columns. Caution! If you already have set a global string cache, set this to `False` as this will reset the global cache when the query is finished. no_optimization Turn off optimizations. Returns ------- DataFrame Fetch is like a collect operation, but it overwrites the number of rows read by every scan operation. This is a utility that helps debug a query on a smaller number of rows. Note that the fetch does not guarantee the final number of rows in the DataFrame. Filter, join operations and a lower number of rows available in the scanned file influence the final number of rows. Parameters ---------- n_rows Collect n_rows from the data sources. type_coercion Run type coercion optimization. predicate_pushdown Run predicate pushdown optimization. projection_pushdown Run projection pushdown optimization. simplify_expression Run simplify expressions optimization. string_cache Use a global string cache in this query. This is needed if you want to join on categorical columns. no_optimization Turn off optimizations. Returns ------- DataFrame Get or set column names. Examples -------- >>> df = (pl.DataFrame({ >>> "foo": [1, 2, 3], >>> "bar": [6, 7, 8], >>> "ham": ['a', 'b', 'c'] >>> }).lazy() >>> .select(["foo", "bar"])) >>> df.columns ["foo", "bar"] Cache the result once the execution of the physical plan hits this node. Filter the rows in the DataFrame based on a predicate expression. Parameters ---------- predicate Expression that evaluates to a boolean Series. Select columns from this DataFrame. Parameters ---------- exprs Column or columns to select. Start a groupby operation. Parameters ---------- by Column(s) to group by. maintain_order Make sure that the order of the groups remain consistent. This is more expensive than a default groupby. Add a join operation to the Logical Plan. Parameters ---------- ldf Lazy DataFrame to join with. left_on Join column of the left DataFrame. right_on Join column of the right DataFrame. on Join column of both DataFrames. If set, `left_on` and `right_on` should be None. how one of: "inner" "left" "outer" "asof", "cross" suffix Suffix to append to columns with a duplicate name. allow_parallel Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel. force_parallel Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel. # Asof joins This is similar to a left-join except that we match on nearest key rather than equal keys. The keys must be sorted to perform an asof join # type: ignore[assignment] # type: ignore[assignment] # set asof_by # type: ignore[assignment] # type: ignore[assignment] Add or overwrite multiple columns in a DataFrame. Parameters ---------- exprs List of Expressions that evaluate to columns. Add or overwrite column in a DataFrame. Parameters ---------- expr Expression that evaluates to column. Remove one or multiple columns from a DataFrame. Parameters ---------- columns - Name of the column that should be removed. - List of column names. Rename a column in the DataFrame Rename column names. This does not preserve column order. Parameters ---------- mapping Key value pairs that map from old name to new name. Reverse the DataFrame. Shift the values by a given period and fill the parts that will be empty due to this operation with `Nones`. Parameters ---------- periods Number of places to shift (may be negative). Shift the values by a given period and fill the parts that will be empty due to this operation with the result of the `fill_value` expression. Parameters ---------- periods Number of places to shift (may be negative). fill_value fill None values with the result of this expression. Slice the DataFrame. Parameters ---------- offset Start index. length Length of the slice. Limit the DataFrame to the first `n` rows. Note if you don't want the rows to be scanned, use the `fetch` operation. Parameters ---------- n Number of rows. Get the first `n` rows of the DataFrame Note if you don't want the rows to be scanned, use the `fetch` operation. Parameters ---------- n Number of rows. Get the last `n` rows of the DataFrame. Parameters ---------- n Number of rows. Get the last row of the DataFrame. Get the first row of the DataFrame. Add a column at index 0 that counts the rows. Parameters ---------- name Name of the column to add. Fill missing values Parameters ---------- fill_value Value to fill the missing values with Fill floating point NaN values. ..warning:: NOTE that floating point NaN (No a Number) are not missing values! to replace missing values, use `fill_null`. Parameters ---------- fill_value Value to fill the NaN values with Aggregate the columns in the DataFrame to their standard deviation value. Aggregate the columns in the DataFrame to their variance value. Aggregate the columns in the DataFrame to their maximum value. Aggregate the columns in the DataFrame to their minimum value. Aggregate the columns in the DataFrame to their sum value. Aggregate the columns in the DataFrame to their mean value. Aggregate the columns in the DataFrame to their median value. Aggregate the columns in the DataFrame to their quantile value. Explode lists to long format. Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]] >>> }) >>> df shape: (6, 2) ╭─────────┬────────────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ list [i64] │ ╞═════════╪════════════╡ │ "c" ┆ [1, 2] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "c" ┆ [1, 3] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "a" ┆ [4, 3] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "c" ┆ [5, 5, 5] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "a" ┆ [6] │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤ │ "b" ┆ [2, 1, 2] │ ╰─────────┴────────────╯ >>> df.explode("nrs") shape: (13, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ ... ┆ ... │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 2 │ ╰─────────┴─────╯ Drop duplicate rows from this DataFrame. Note that this fails if there is a column of type `List` in the DataFrame. Drop rows with null values from this DataFrame. Examples -------- >>> df = pl.DataFrame({ >>> "foo": [1, 2, 3], >>> "bar": [6, None, 8], >>> "ham": ['a', 'b', 'c'] >>> }) >>> df.lazy().drop_nulls().collect() shape: (2, 3) ┌─────┬─────┬─────┐ │ foo ┆ bar ┆ ham │ │ --- ┆ --- ┆ --- │ │ i64 ┆ i64 ┆ str │ ╞═════╪═════╪═════╡ │ 1 ┆ 6 ┆ "a" │ ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤ │ 3 ┆ 8 ┆ "c" │ └─────┴─────┴─────┘ This method only drops nulls row-wise if any single value of the row is null. Below are some example snippets that show how you could drop null values based on other conditions >>> df = pl.DataFrame( >>> { >>> "a": [None, None, None, None], >>> "b": [1, 2, None, 1], >>> "c": [1, None, None, 1], >>> } >>> ) >>> df shape: (4, 3) ┌──────┬──────┬──────┐ │ a ┆ b ┆ c │ │ --- ┆ --- ┆ --- │ │ f64 ┆ i64 ┆ i64 │ ╞══════╪══════╪══════╡ │ null ┆ 1 ┆ 1 │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 2 ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ null ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 1 ┆ 1 │ └──────┴──────┴──────┘ >>> # drop a row only if all values are null >>> df.filter(~pl.fold(acc=True, f=lambda acc, s: acc & s.is_null(), exprs=pl.all())) shape: (3, 3) ┌──────┬─────┬──────┐ │ a ┆ b ┆ c │ │ --- ┆ --- ┆ --- │ │ f64 ┆ i64 ┆ i64 │ ╞══════╪═════╪══════╡ │ null ┆ 1 ┆ 1 │ ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 2 ┆ null │ ├╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌┤ │ null ┆ 1 ┆ 1 │ └──────┴─────┴──────┘ Unpivot DataFrame to long format. Parameters ---------- id_vars Columns to use as identifier variables. value_vars Values to use as identifier variables. Apply a custom function. It is important that the function returns a Polars DataFrame. Parameters ---------- f Lambda/ function to apply. predicate_pushdown Allow predicate pushdown optimization to pass this node. projection_pushdown Allow projection pushdown optimization to pass this node. no_optimizations Turn off all optimizations past this point. Interpolate intermediate values. The interpolation method is linear. # type: ignore Created by `df.lazy().groupby("foo)"` Describe the aggregation that need to be done on a group. Parameters ---------- aggs Single/ Multiple aggregation expression(s). Examples -------- >>> (pl.scan_csv("data.csv") .groupby("groups") .agg([ pl.col("name").n_unique().alias("unique_names"), pl.max("values") ]) ) Return first n rows of each group. Parameters ---------- n Number of values of the group to select Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [1, 2, 3, 4, 5, 6] >>> }) >>> df shape: (6, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ╰─────────┴─────╯ >>> (df.groupby("letters") >>> .head(2) >>> .sort("letters") >>> ) shape: (5, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ╰─────────┴─────╯ Return last n rows of each group. Parameters ---------- n Number of values of the group to select Examples -------- >>> df = pl.DataFrame({ >>> "letters": ["c", "c", "a", "c", "a", "b"], >>> "nrs": [1, 2, 3, 4, 5, 6] >>> }) >>> df shape: (6, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "c" ┆ 1 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ╰─────────┴─────╯ >>> (df.groupby("letters") >>> .tail(2) >>> .sort("letters") >>> ) shape: (5, 2) ╭─────────┬─────╮ │ letters ┆ nrs │ │ --- ┆ --- │ │ str ┆ i64 │ ╞═════════╪═════╡ │ "a" ┆ 3 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "a" ┆ 5 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "b" ┆ 6 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 2 │ ├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤ │ "c" ┆ 4 │ ╰─────────┴─────╯ Apply a function over the groups as a new `DataFrame`. It is not recommended that you use this as materializing the `DataFrame` is quite expensive. Parameters ---------- f Function to apply over the `DataFrame`.
| 2.530981
| 3
|
predict.py
|
sdc17/NaivePinYin
| 0
|
6625441
|
<reponame>sdc17/NaivePinYin
import os
import ast
import math
import pickle
import argparse
from itertools import product
import numpy as np
def predict_two_char(ipath, opath, alpha=1e-10, st=25):
# load gram
gram1 = {}
gram2 = {}
pinyin = {}
with open('./data/1gram.pkl', 'rb') as f:
gram1 = pickle.load(f)
with open('./data/2gram.pkl', 'rb') as f:
gram2 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not gram1 or not gram2 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
pos = 0
# fisrt char
if words:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
# first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * gram2.get('s' + key, 0) / gram1['s'] + alpha * first_prob[key])
# probs.append(first_prob)
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
# subsequent chars
for i in range(1, len(words)):
cnts = {x : gram1[x] for x in pinyin[words[i].lower()]}
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in pinyin[words[i]]:
dp = []
for k in pinyin[words[i - 1].lower()]:
dp.append(probs[i - 1][k] * ((1 - alpha) * gram2.get(k + j, 0) / (gram1[k] + 1e-30) + alpha * prob_one[j]))
# dp.append(probs[i - 1][k] + math.log((1 - alpha) * gram2.get(k + j, 0) / (gram1[k] + 1e-30) + alpha * prob_one[j] + 1e-30))
prob[j] = max(dp)
trace[j] = pinyin[words[i - 1].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# last char
for key in probs[-1].keys():
probs[-1][key] *= (st * (1 - alpha) * gram2.get(key + 't', 0) / (gram1[key] + 1e-30) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
def predict_three_char(ipath, opath, alpha=1e-10, st=40, full_model=False):
# load gram
gram1 = {}
gram2 = {}
gram3 = {}
pinyin = {}
with open('./data/1gram.pkl', 'rb') as f:
gram1 = pickle.load(f)
with open('./data/2gram.pkl', 'rb') as f:
gram2 = pickle.load(f)
if full_model:
with open('./data/3gram_whole.pkl', 'rb') as f:
gram3 = pickle.load(f)
else:
with open('./data/3gram.pkl', 'rb') as f:
gram3 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not gram1 or not gram2 or not gram3 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
# fisrt char
if len(words) >= 1:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
# first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * gram2.get('s' + key, 0) / gram1['s'] + alpha * first_prob[key])
# probs.append(first_prob)
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
# second char
if len(words) >= 2:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
second_cnt = {x : gram1[x] for x in pinyin[words[1].lower()]}
tot = sum(second_cnt.values())
second_prob = {x : y/tot for x, y in second_cnt.items()}
prob = {}
trace = {}
for key in second_prob.keys():
dp = [(st * (1 - alpha) * gram3.get('s' + x + key, 0) / (gram2.get('s' + x, 0) + 1e-30)) + alpha * second_prob[key] for x in first_prob.keys()]
prob[key] = max(dp)
trace[key] = pinyin[words[0].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# subsequent chars
for i in range(2, len(words)):
cnts = {x : gram1[x] for x in pinyin[words[i].lower()]}
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in pinyin[words[i]]:
dp = []
for k in pinyin[words[i - 1].lower()]:
dp.append(max([probs[i - 1][k] * ((1 - alpha) * gram3.get(l + k + j, 0) / (gram2.get(l + k, 0) + 1e-30) + alpha * prob_one[j]) for l in pinyin[words[i - 2].lower()]]))
prob[j] = max(dp)
trace[j] = pinyin[words[i - 1].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# last char
if len(words) >= 2:
for key in probs[-1].keys():
probs[-1][key] *= max([(st * (1 - alpha) * gram3.get(x + key + 't', 0) / (gram2.get(x + key, 0) + 1e-30) + alpha * 1.0) for x in probs[-2].keys()])
else:
for key in probs[-1].keys():
probs[-1][key] *= (st * (1 - alpha) * gram2.get(key + 't', 0) / (gram1[key] + 1e-30) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
def predict_two_word(ipath, opath, alpha=1e-10, st=1, full_model=False):
# load word
word1 = {}
word2 = {}
pinyin = {}
with open('./data/1word.pkl', 'rb') as f:
word1 = pickle.load(f)
if full_model:
with open('./data/2word_whole.pkl', 'rb') as f:
word2 = pickle.load(f)
else:
with open('./data/2word.pkl', 'rb') as f:
word2 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not word1 or not word2 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
# fisrt word
if len(words) == 1:
first_cnt = {x : word1.get(x, 0) for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * word2.get('s_' + key, 0) / word1['s'] + alpha * first_prob[key])
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
elif len(words) >= 2:
first_cnt = {}
for x in pinyin[words[0].lower()]:
for y in pinyin[words[1].lower()]:
if word1.get(x + y):
first_cnt[x + y] = word1[x + y]
# first_cnt[x + y] = word1.get(x + y, 0)
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * word2.get('s_' + key, 0) / word1['s'] + alpha * first_prob[key])
# fix zero prob
if not prob:
fix_x = {}
fix_y = {}
for x in pinyin[words[0].lower()]:
if word1.get(x):
fix_x[x] = word1[x]
for x in pinyin[words[1].lower()]:
if word1.get(x):
fix_y[x] = word1[x]
fix = max(fix_x, key=lambda x:fix_x[x]) + max(fix_y, key=lambda x:fix_y[x])
trace[fix] = 's'
prob[fix] = 1.0
else:
trace = {x : 's' for x in first_prob.keys()}
probs.append(prob)
traces.append(trace)
if len(words) % 2 == 0:
end = len(words)
else:
end = len(words) - 1
# subsequent chars
for i in range(2, end, 2):
cnts = {}
for x in pinyin[words[i].lower()]:
for y in pinyin[words[i + 1].lower()]:
if word1.get(x + y):
cnts[x + y] = word1[x + y]
# cnts[x + y] = word1.get(x + y, 0)
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in prob_one.keys():
dp = []
for m in probs[-1].keys():
dp.append(probs[i//2 - 1][m] * ((1 - alpha) * word2.get(m + '_' + j, 0) / (word1.get(m, 0) + 1e-60) + alpha * prob_one[j]))
prob[j] = max(dp)
trace[j] = list(probs[-1].keys())[np.argmax(dp)]
# fix zero prob
if not prob:
fix_x = {}
fix_y = {}
for x in pinyin[words[i].lower()]:
if word1.get(x):
fix_x[x] = word1[x]
for x in pinyin[words[i + 1].lower()]:
if word1.get(x):
fix_y[x] = word1[x]
fix = max(fix_x, key=lambda x:fix_x[x]) + max(fix_y, key=lambda x:fix_y[x])
trace[fix] = max(probs[-1], key=lambda x: probs[-1][x])
prob[fix] = 1.0
probs.append(prob)
traces.append(trace)
lenw = len(words)
if lenw % 2 == 1 and lenw >= 3:
cnts = {}
for x in pinyin[words[lenw - 1].lower()]:
if word1.get(x):
cnts[x] = word1[x]
# cnts[x] = word1.get(x, 0)
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for i in prob_one.keys():
dp = []
for m in probs[-1].keys():
dp.append(probs[(lenw // 2) - 1][m] * ((1 - alpha) * word2.get(m + '_' + i, 0) / (word1.get(m, 0) + 1e-60) + alpha * prob_one[i]))
prob[i] = max(dp)
trace[i] = list(probs[-1].keys())[np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# for key in probs[-1].keys():
# probs[-1][key] *= (st * (1 - alpha) * word2.get(key + '_t', 0) / (word1.get(key, 0) + 1e-60) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", default='./data/input.txt', type=str, help="input file")
parser.add_argument("-o", default='./data/output.txt', type=str, help="output file")
parser.add_argument("--model_type", default='3c', type=str, choices=['2c', '3c', '2w'], help="Available models")
parser.add_argument("--full_model", default=False, type=ast.literal_eval, choices=[True, False], help="Use full model or not")
args = parser.parse_args()
if os.path.exists(args.i):
if os.path.exists(args.o):
os.remove(args.o)
if args.model_type == '2c':
predict_two_char(args.i, args.o, alpha=1e-10)
elif args.model_type == '3c':
predict_three_char(args.i, args.o, alpha=1e-10, full_model=args.full_model)
elif args.model_type == '2w':
predict_two_word(args.i, args.o, alpha=1e-10, full_model=args.full_model)
else:
print("Input file is invalid!")
|
import os
import ast
import math
import pickle
import argparse
from itertools import product
import numpy as np
def predict_two_char(ipath, opath, alpha=1e-10, st=25):
# load gram
gram1 = {}
gram2 = {}
pinyin = {}
with open('./data/1gram.pkl', 'rb') as f:
gram1 = pickle.load(f)
with open('./data/2gram.pkl', 'rb') as f:
gram2 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not gram1 or not gram2 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
pos = 0
# fisrt char
if words:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
# first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * gram2.get('s' + key, 0) / gram1['s'] + alpha * first_prob[key])
# probs.append(first_prob)
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
# subsequent chars
for i in range(1, len(words)):
cnts = {x : gram1[x] for x in pinyin[words[i].lower()]}
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in pinyin[words[i]]:
dp = []
for k in pinyin[words[i - 1].lower()]:
dp.append(probs[i - 1][k] * ((1 - alpha) * gram2.get(k + j, 0) / (gram1[k] + 1e-30) + alpha * prob_one[j]))
# dp.append(probs[i - 1][k] + math.log((1 - alpha) * gram2.get(k + j, 0) / (gram1[k] + 1e-30) + alpha * prob_one[j] + 1e-30))
prob[j] = max(dp)
trace[j] = pinyin[words[i - 1].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# last char
for key in probs[-1].keys():
probs[-1][key] *= (st * (1 - alpha) * gram2.get(key + 't', 0) / (gram1[key] + 1e-30) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
def predict_three_char(ipath, opath, alpha=1e-10, st=40, full_model=False):
# load gram
gram1 = {}
gram2 = {}
gram3 = {}
pinyin = {}
with open('./data/1gram.pkl', 'rb') as f:
gram1 = pickle.load(f)
with open('./data/2gram.pkl', 'rb') as f:
gram2 = pickle.load(f)
if full_model:
with open('./data/3gram_whole.pkl', 'rb') as f:
gram3 = pickle.load(f)
else:
with open('./data/3gram.pkl', 'rb') as f:
gram3 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not gram1 or not gram2 or not gram3 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
# fisrt char
if len(words) >= 1:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
# first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * gram2.get('s' + key, 0) / gram1['s'] + alpha * first_prob[key])
# probs.append(first_prob)
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
# second char
if len(words) >= 2:
first_cnt = {x : gram1[x] for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
second_cnt = {x : gram1[x] for x in pinyin[words[1].lower()]}
tot = sum(second_cnt.values())
second_prob = {x : y/tot for x, y in second_cnt.items()}
prob = {}
trace = {}
for key in second_prob.keys():
dp = [(st * (1 - alpha) * gram3.get('s' + x + key, 0) / (gram2.get('s' + x, 0) + 1e-30)) + alpha * second_prob[key] for x in first_prob.keys()]
prob[key] = max(dp)
trace[key] = pinyin[words[0].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# subsequent chars
for i in range(2, len(words)):
cnts = {x : gram1[x] for x in pinyin[words[i].lower()]}
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in pinyin[words[i]]:
dp = []
for k in pinyin[words[i - 1].lower()]:
dp.append(max([probs[i - 1][k] * ((1 - alpha) * gram3.get(l + k + j, 0) / (gram2.get(l + k, 0) + 1e-30) + alpha * prob_one[j]) for l in pinyin[words[i - 2].lower()]]))
prob[j] = max(dp)
trace[j] = pinyin[words[i - 1].lower()][np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# last char
if len(words) >= 2:
for key in probs[-1].keys():
probs[-1][key] *= max([(st * (1 - alpha) * gram3.get(x + key + 't', 0) / (gram2.get(x + key, 0) + 1e-30) + alpha * 1.0) for x in probs[-2].keys()])
else:
for key in probs[-1].keys():
probs[-1][key] *= (st * (1 - alpha) * gram2.get(key + 't', 0) / (gram1[key] + 1e-30) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
def predict_two_word(ipath, opath, alpha=1e-10, st=1, full_model=False):
# load word
word1 = {}
word2 = {}
pinyin = {}
with open('./data/1word.pkl', 'rb') as f:
word1 = pickle.load(f)
if full_model:
with open('./data/2word_whole.pkl', 'rb') as f:
word2 = pickle.load(f)
else:
with open('./data/2word.pkl', 'rb') as f:
word2 = pickle.load(f)
with open('./data/汉字拼音表.txt', encoding='gbk') as f:
lines = f.readlines()
for line in lines:
content = line.strip().split(' ')
pinyin[content[0]] = content[1:]
if not word1 or not word2 or not pinyin:
print("Load gram error!")
return
# predict
with open(ipath, 'r') as f:
lines = f.readlines()
for line in lines:
probs = []
traces = []
words = line.strip().split(' ')
# fisrt word
if len(words) == 1:
first_cnt = {x : word1.get(x, 0) for x in pinyin[words[0].lower()]}
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * word2.get('s_' + key, 0) / word1['s'] + alpha * first_prob[key])
probs.append(prob)
trace = {x:'s' for x in pinyin[words[0].lower()]}
traces.append(trace)
elif len(words) >= 2:
first_cnt = {}
for x in pinyin[words[0].lower()]:
for y in pinyin[words[1].lower()]:
if word1.get(x + y):
first_cnt[x + y] = word1[x + y]
# first_cnt[x + y] = word1.get(x + y, 0)
tot = sum(first_cnt.values())
first_prob = {x: y/tot for x, y in first_cnt.items()}
prob = {}
for key, value in first_prob.items():
prob[key] = (st * (1 - alpha) * word2.get('s_' + key, 0) / word1['s'] + alpha * first_prob[key])
# fix zero prob
if not prob:
fix_x = {}
fix_y = {}
for x in pinyin[words[0].lower()]:
if word1.get(x):
fix_x[x] = word1[x]
for x in pinyin[words[1].lower()]:
if word1.get(x):
fix_y[x] = word1[x]
fix = max(fix_x, key=lambda x:fix_x[x]) + max(fix_y, key=lambda x:fix_y[x])
trace[fix] = 's'
prob[fix] = 1.0
else:
trace = {x : 's' for x in first_prob.keys()}
probs.append(prob)
traces.append(trace)
if len(words) % 2 == 0:
end = len(words)
else:
end = len(words) - 1
# subsequent chars
for i in range(2, end, 2):
cnts = {}
for x in pinyin[words[i].lower()]:
for y in pinyin[words[i + 1].lower()]:
if word1.get(x + y):
cnts[x + y] = word1[x + y]
# cnts[x + y] = word1.get(x + y, 0)
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for j in prob_one.keys():
dp = []
for m in probs[-1].keys():
dp.append(probs[i//2 - 1][m] * ((1 - alpha) * word2.get(m + '_' + j, 0) / (word1.get(m, 0) + 1e-60) + alpha * prob_one[j]))
prob[j] = max(dp)
trace[j] = list(probs[-1].keys())[np.argmax(dp)]
# fix zero prob
if not prob:
fix_x = {}
fix_y = {}
for x in pinyin[words[i].lower()]:
if word1.get(x):
fix_x[x] = word1[x]
for x in pinyin[words[i + 1].lower()]:
if word1.get(x):
fix_y[x] = word1[x]
fix = max(fix_x, key=lambda x:fix_x[x]) + max(fix_y, key=lambda x:fix_y[x])
trace[fix] = max(probs[-1], key=lambda x: probs[-1][x])
prob[fix] = 1.0
probs.append(prob)
traces.append(trace)
lenw = len(words)
if lenw % 2 == 1 and lenw >= 3:
cnts = {}
for x in pinyin[words[lenw - 1].lower()]:
if word1.get(x):
cnts[x] = word1[x]
# cnts[x] = word1.get(x, 0)
tot = sum(cnts.values())
prob_one = {x: y/tot for x, y in cnts.items()}
prob = {}
trace = {}
for i in prob_one.keys():
dp = []
for m in probs[-1].keys():
dp.append(probs[(lenw // 2) - 1][m] * ((1 - alpha) * word2.get(m + '_' + i, 0) / (word1.get(m, 0) + 1e-60) + alpha * prob_one[i]))
prob[i] = max(dp)
trace[i] = list(probs[-1].keys())[np.argmax(dp)]
probs.append(prob)
traces.append(trace)
# for key in probs[-1].keys():
# probs[-1][key] *= (st * (1 - alpha) * word2.get(key + '_t', 0) / (word1.get(key, 0) + 1e-60) + alpha * 1.0)
# backtrace
last = max(probs[-1], key=lambda x:probs[-1][x])
out = []
out.append(last)
for x in range(len(traces) - 1, 0, -1):
out.append(traces[x][out[-1]])
output = ''.join(out[::-1])
with open(opath, 'a') as f:
f.write(output + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", default='./data/input.txt', type=str, help="input file")
parser.add_argument("-o", default='./data/output.txt', type=str, help="output file")
parser.add_argument("--model_type", default='3c', type=str, choices=['2c', '3c', '2w'], help="Available models")
parser.add_argument("--full_model", default=False, type=ast.literal_eval, choices=[True, False], help="Use full model or not")
args = parser.parse_args()
if os.path.exists(args.i):
if os.path.exists(args.o):
os.remove(args.o)
if args.model_type == '2c':
predict_two_char(args.i, args.o, alpha=1e-10)
elif args.model_type == '3c':
predict_three_char(args.i, args.o, alpha=1e-10, full_model=args.full_model)
elif args.model_type == '2w':
predict_two_word(args.i, args.o, alpha=1e-10, full_model=args.full_model)
else:
print("Input file is invalid!")
|
en
| 0.336969
|
# load gram # predict # fisrt char # first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()} # probs.append(first_prob) # subsequent chars # dp.append(probs[i - 1][k] + math.log((1 - alpha) * gram2.get(k + j, 0) / (gram1[k] + 1e-30) + alpha * prob_one[j] + 1e-30)) # last char # backtrace # load gram # predict # fisrt char # first_prob = {x: math.log(y/tot + 1e-30) for x, y in first_cnt.items()} # probs.append(first_prob) # second char # subsequent chars # last char # backtrace # load word # predict # fisrt word # first_cnt[x + y] = word1.get(x + y, 0) # fix zero prob # subsequent chars # cnts[x + y] = word1.get(x + y, 0) # fix zero prob # cnts[x] = word1.get(x, 0) # for key in probs[-1].keys(): # probs[-1][key] *= (st * (1 - alpha) * word2.get(key + '_t', 0) / (word1.get(key, 0) + 1e-60) + alpha * 1.0) # backtrace
| 2.643984
| 3
|
scraper.py
|
elolb/Dota2-Pick-Helper
| 0
|
6625442
|
#! python3
# scraper.py
# Scrapes the counter lists from gamepedia. Collects the necessary data for the pick helper.
# Data
heroList=["Abaddon","Alchemist","Ancient Apparition","Anti-Mage","Arc Warden","Axe","Bane","Batrider","Beastmaster","Bloodseeker","Bounty Hunter",\
"Brewmaster","Bristleback","Broodmother","Centaur Warrunner","Chaos Knight","Chen","Clinkz","Clockwerk","Crystal Maiden","Dark Seer","Dark Willow",\
"Dawnbreaker","Dazzle","Death Prophet","Disruptor","Doom","Dragon Knight","Drow Ranger","Earth Spirit","Earthshaker","Elder Titan","Ember Spirit","Enchantress",\
"Enigma","Faceless Void","Grimstroke","Gyrocopter","Hoodwink","Huskar","Invoker","Io","Jakiro","Juggernaut","Keeper of the Light","Kunkka","Legion Commander","Leshrac",\
"Lich","Lifestealer","Lina","Lion","Lone Druid","Luna","Lycan","Mars","Magnus","Medusa","Meepo","Mirana","Monkey King","Morphling","Naga Siren",\
"Nature\'s Prophet","Necrophos","Night Stalker","Nyx Assassin","Ogre Magi","Omniknight","Oracle","Outworld Devourer","Pangolier","Phantom Assassin",\
"Phantom Lancer","Phoenix","Puck","Pudge","Pugna","Queen of Pain","Razor","Riki","Rubick","Sand King","Shadow Demon","Shadow Fiend","Shadow Shaman",\
"Silencer","Skywrath Mage","Slardar","Slark","Snapfire","Sniper","Spectre","Spirit Breaker","Storm Spirit","Sven","Techies","Templar Assassin","Terrorblade",\
"Tidehunter","Timbersaw","Tinker","Tiny","Treant Protector","Troll Warlord","Tusk","Underlord","Undying","Ursa","Vengeful Spirit","Venomancer",\
"Viper","Visage","Void Spirit","Warlock","Weaver","Windranger","Winter Wyvern","Witch Doctor","Wraith King","Zeus"]
counterLinkPre='https://dota2.gamepedia.com/'
counterLinkPost='/Counters'
# Scraping
import requests,bs4,pickle
file=open('List.txt', "w")
count=0
badAgainstDict={}
badAgainstOthersDict={}
goodAgainstDict={}
goodAgainstOthersDict={}
worksWellDict={}
worksWellOthersDict={}
for hero in heroList:
site=requests.get(counterLinkPre+hero+counterLinkPost)
site.raise_for_status()
soup=bs4.BeautifulSoup(site.text,'html.parser')
#print(soup.find('span',id="Bad_against..."))
#select bold (only hero names are in bold on the website)
counters=soup.select('b')
#for each hero create 6 lists
badAgainst={}
badAgainstOthers={}
goodAgainst={}
goodAgainstOthers={}
worksWell={}
worksWellOthers={}
# for x in some_list: sets x to each element of some_list, not each index.
# for each hero name
for i in counters:
if (i.findNext('h2').getText()=='Good against...[edit]' ):
# if(i.findNext('h3'.find('span')['id']=='Others'):
detail=i.findNext('ul').getText()
badAgainst[i.getText()]=detail
#print(badAgainst)
for i in counters:
if (i.findNext('h2').getText()=='Works well with...[edit]' ):
detail=i.findNext('ul').getText()
goodAgainst[i.getText()]=detail
#print(goodAgainst)
for i in counters:
if (i.findNext('h2').getText()=='Navigation menu' ):
detail=i.findNext('ul').getText()
worksWell[i.getText()]=detail
#print(worksWellW)
# if len(badAgainst
badAgainstDict[hero]=badAgainst
goodAgainstDict[hero]=goodAgainst
worksWellDict[hero]=worksWell
print(hero+" list extracted from "+ counterLinkPre+hero+counterLinkPost+"...")
count=count+1
#with open(file, "w", encoding="utf-8") as f:
# f.write(str(badAgainstDict))
# f.write(str(goodAgainstDict))
# f.write(str(worksWellDict))
#file.write(str(badAgainstDict))
#file.write(str(goodAgainstDict))
#file.write(str(worksWellDict))
file=open("badAgainstDict.pkl","wb")
pickle.dump(badAgainstDict, file, pickle.HIGHEST_PROTOCOL)
file.close()
file2=open("goodAgainstDict.pkl","wb")
pickle.dump(goodAgainstDict, file2, pickle.HIGHEST_PROTOCOL)
file2.close()
file3=open("worksWellDict.pkl","wb")
pickle.dump(worksWellDict, file3, pickle.HIGHEST_PROTOCOL)
file3.close()
file=open("badAgainst.pkl","wb")
pickle.dump(badAgainst, file, pickle.HIGHEST_PROTOCOL)
file.close()
file2=open("goodAgainst.pkl","wb")
pickle.dump(goodAgainst, file2, pickle.HIGHEST_PROTOCOL)
file2.close()
file3=open("worksWell.pkl","wb")
pickle.dump(worksWell, file3, pickle.HIGHEST_PROTOCOL)
file3.close()
print("DONE. "+str(count)+" heroes counters are extracted.")
|
#! python3
# scraper.py
# Scrapes the counter lists from gamepedia. Collects the necessary data for the pick helper.
# Data
heroList=["Abaddon","Alchemist","Ancient Apparition","Anti-Mage","Arc Warden","Axe","Bane","Batrider","Beastmaster","Bloodseeker","Bounty Hunter",\
"Brewmaster","Bristleback","Broodmother","Centaur Warrunner","Chaos Knight","Chen","Clinkz","Clockwerk","Crystal Maiden","Dark Seer","Dark Willow",\
"Dawnbreaker","Dazzle","Death Prophet","Disruptor","Doom","Dragon Knight","Drow Ranger","Earth Spirit","Earthshaker","Elder Titan","Ember Spirit","Enchantress",\
"Enigma","Faceless Void","Grimstroke","Gyrocopter","Hoodwink","Huskar","Invoker","Io","Jakiro","Juggernaut","Keeper of the Light","Kunkka","Legion Commander","Leshrac",\
"Lich","Lifestealer","Lina","Lion","Lone Druid","Luna","Lycan","Mars","Magnus","Medusa","Meepo","Mirana","Monkey King","Morphling","Naga Siren",\
"Nature\'s Prophet","Necrophos","Night Stalker","Nyx Assassin","Ogre Magi","Omniknight","Oracle","Outworld Devourer","Pangolier","Phantom Assassin",\
"Phantom Lancer","Phoenix","Puck","Pudge","Pugna","Queen of Pain","Razor","Riki","Rubick","Sand King","Shadow Demon","Shadow Fiend","Shadow Shaman",\
"Silencer","Skywrath Mage","Slardar","Slark","Snapfire","Sniper","Spectre","Spirit Breaker","Storm Spirit","Sven","Techies","Templar Assassin","Terrorblade",\
"Tidehunter","Timbersaw","Tinker","Tiny","Treant Protector","Troll Warlord","Tusk","Underlord","Undying","Ursa","Vengeful Spirit","Venomancer",\
"Viper","Visage","Void Spirit","Warlock","Weaver","Windranger","Winter Wyvern","Witch Doctor","Wraith King","Zeus"]
counterLinkPre='https://dota2.gamepedia.com/'
counterLinkPost='/Counters'
# Scraping
import requests,bs4,pickle
file=open('List.txt', "w")
count=0
badAgainstDict={}
badAgainstOthersDict={}
goodAgainstDict={}
goodAgainstOthersDict={}
worksWellDict={}
worksWellOthersDict={}
for hero in heroList:
site=requests.get(counterLinkPre+hero+counterLinkPost)
site.raise_for_status()
soup=bs4.BeautifulSoup(site.text,'html.parser')
#print(soup.find('span',id="Bad_against..."))
#select bold (only hero names are in bold on the website)
counters=soup.select('b')
#for each hero create 6 lists
badAgainst={}
badAgainstOthers={}
goodAgainst={}
goodAgainstOthers={}
worksWell={}
worksWellOthers={}
# for x in some_list: sets x to each element of some_list, not each index.
# for each hero name
for i in counters:
if (i.findNext('h2').getText()=='Good against...[edit]' ):
# if(i.findNext('h3'.find('span')['id']=='Others'):
detail=i.findNext('ul').getText()
badAgainst[i.getText()]=detail
#print(badAgainst)
for i in counters:
if (i.findNext('h2').getText()=='Works well with...[edit]' ):
detail=i.findNext('ul').getText()
goodAgainst[i.getText()]=detail
#print(goodAgainst)
for i in counters:
if (i.findNext('h2').getText()=='Navigation menu' ):
detail=i.findNext('ul').getText()
worksWell[i.getText()]=detail
#print(worksWellW)
# if len(badAgainst
badAgainstDict[hero]=badAgainst
goodAgainstDict[hero]=goodAgainst
worksWellDict[hero]=worksWell
print(hero+" list extracted from "+ counterLinkPre+hero+counterLinkPost+"...")
count=count+1
#with open(file, "w", encoding="utf-8") as f:
# f.write(str(badAgainstDict))
# f.write(str(goodAgainstDict))
# f.write(str(worksWellDict))
#file.write(str(badAgainstDict))
#file.write(str(goodAgainstDict))
#file.write(str(worksWellDict))
file=open("badAgainstDict.pkl","wb")
pickle.dump(badAgainstDict, file, pickle.HIGHEST_PROTOCOL)
file.close()
file2=open("goodAgainstDict.pkl","wb")
pickle.dump(goodAgainstDict, file2, pickle.HIGHEST_PROTOCOL)
file2.close()
file3=open("worksWellDict.pkl","wb")
pickle.dump(worksWellDict, file3, pickle.HIGHEST_PROTOCOL)
file3.close()
file=open("badAgainst.pkl","wb")
pickle.dump(badAgainst, file, pickle.HIGHEST_PROTOCOL)
file.close()
file2=open("goodAgainst.pkl","wb")
pickle.dump(goodAgainst, file2, pickle.HIGHEST_PROTOCOL)
file2.close()
file3=open("worksWell.pkl","wb")
pickle.dump(worksWell, file3, pickle.HIGHEST_PROTOCOL)
file3.close()
print("DONE. "+str(count)+" heroes counters are extracted.")
|
en
| 0.418932
|
#! python3 # scraper.py # Scrapes the counter lists from gamepedia. Collects the necessary data for the pick helper. # Data # Scraping #print(soup.find('span',id="Bad_against...")) #select bold (only hero names are in bold on the website) #for each hero create 6 lists # for x in some_list: sets x to each element of some_list, not each index. # for each hero name # if(i.findNext('h3'.find('span')['id']=='Others'): #print(badAgainst) #print(goodAgainst) #print(worksWellW) # if len(badAgainst #with open(file, "w", encoding="utf-8") as f: # f.write(str(badAgainstDict)) # f.write(str(goodAgainstDict)) # f.write(str(worksWellDict)) #file.write(str(badAgainstDict)) #file.write(str(goodAgainstDict)) #file.write(str(worksWellDict))
| 2.061381
| 2
|
cubes_lite/sql/conditions.py
|
notexistence/cubes_lite
| 0
|
6625443
|
<reponame>notexistence/cubes_lite
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import collections
import sqlalchemy.sql as sql
from cubes_lite.model.utils import cached_property
from cubes_lite.query.conditions import ConditionBase as ConditionBase_
from cubes_lite.sql.mapping import Mapper
__all__ = (
'PointCondition',
'MatchCondition',
'RangeCondition',
'OptionalCondition',
)
class ConditionBase(ConditionBase_):
def evaluate(self, mapper):
assert self.is_bound(), 'Should be bound to model'
column = self._get_column(mapper)
condition = self._evaluate(column)
if self.invert:
condition = sql.expression.not_(condition)
return condition
def _get_column(self, mapper):
if not isinstance(mapper, (Mapper, dict)):
return mapper
if not self.attribute:
return None
if isinstance(mapper, dict):
column = mapper[str(self.attribute)]
else:
column = mapper.get_column_by_attribute(self.attribute)
return column
def _evaluate(self, column):
raise NotImplementedError()
class PointCondition(ConditionBase):
"""Object describing way of slicing a cube through point in a dimension"""
def __init__(self, dimension, value, invert=False, **options):
if isinstance(value, basestring):
value = (value,)
if isinstance(value, collections.Iterable):
value = tuple(value)
if not isinstance(value, (list, tuple)):
value = (value,)
super(PointCondition, self).__init__(dimension, value, invert, **options)
def _evaluate(self, column):
value = self.get_value()
if not value:
# in some cases the value is an empty tuple.
# using empty or_() expression as a workaround
return sql.expression.or_()
if len(value) == 1:
return column == value[0]
return column.in_(value)
class MatchCondition(ConditionBase):
def _evaluate(self, column):
value = self.get_value()
return column.like(value)
class RangeCondition(ConditionBase):
"""Object describing way of slicing a cube between two points of a
dimension that has ordered points. For dimensions with unordered points
behaviour is unknown."""
def __init__(self, dimension, (from_, to_), invert=False, strong=False, **options):
super(RangeCondition, self).__init__(dimension, (from_, to_), invert, **options)
self.strong = strong
@cached_property
def from_(self):
return self.value[0]
@cached_property
def to_(self):
return self.value[1]
def _evaluate(self, column):
upper_operator = sql.operators.gt if self.strong else sql.operators.ge
lower_operator = sql.operators.lt if self.strong else sql.operators.le
from_, to_ = self.get_value()
conditions = []
if self.from_ is not None:
conditions.append(upper_operator(column, from_))
if self.to_ is not None:
conditions.append(lower_operator(column, to_))
return sql.expression.and_(*conditions)
class OptionalCondition(ConditionBase):
def __init__(self, values, invert=False, **options):
assert isinstance(values, list), 'Should be a list of Conditions'
super(OptionalCondition, self).__init__(None, values, invert, **options)
def __repr__(self):
return '<{}({})>'.format(
self.__class__.__name__,
self.value,
)
def bind(self, model):
self.model = model
for child in self.value:
child.bind(model)
def _all_attributes(self):
result = []
for condition in self.value:
attrs = condition._all_attributes()
result.extend(attrs)
return result
def _evaluate(self, mapper):
conditions = [v.evaluate(mapper) for v in self.value]
return sql.expression.or_(*conditions)
def _get_column(self, mapper):
return mapper
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import collections
import sqlalchemy.sql as sql
from cubes_lite.model.utils import cached_property
from cubes_lite.query.conditions import ConditionBase as ConditionBase_
from cubes_lite.sql.mapping import Mapper
__all__ = (
'PointCondition',
'MatchCondition',
'RangeCondition',
'OptionalCondition',
)
class ConditionBase(ConditionBase_):
def evaluate(self, mapper):
assert self.is_bound(), 'Should be bound to model'
column = self._get_column(mapper)
condition = self._evaluate(column)
if self.invert:
condition = sql.expression.not_(condition)
return condition
def _get_column(self, mapper):
if not isinstance(mapper, (Mapper, dict)):
return mapper
if not self.attribute:
return None
if isinstance(mapper, dict):
column = mapper[str(self.attribute)]
else:
column = mapper.get_column_by_attribute(self.attribute)
return column
def _evaluate(self, column):
raise NotImplementedError()
class PointCondition(ConditionBase):
"""Object describing way of slicing a cube through point in a dimension"""
def __init__(self, dimension, value, invert=False, **options):
if isinstance(value, basestring):
value = (value,)
if isinstance(value, collections.Iterable):
value = tuple(value)
if not isinstance(value, (list, tuple)):
value = (value,)
super(PointCondition, self).__init__(dimension, value, invert, **options)
def _evaluate(self, column):
value = self.get_value()
if not value:
# in some cases the value is an empty tuple.
# using empty or_() expression as a workaround
return sql.expression.or_()
if len(value) == 1:
return column == value[0]
return column.in_(value)
class MatchCondition(ConditionBase):
def _evaluate(self, column):
value = self.get_value()
return column.like(value)
class RangeCondition(ConditionBase):
"""Object describing way of slicing a cube between two points of a
dimension that has ordered points. For dimensions with unordered points
behaviour is unknown."""
def __init__(self, dimension, (from_, to_), invert=False, strong=False, **options):
super(RangeCondition, self).__init__(dimension, (from_, to_), invert, **options)
self.strong = strong
@cached_property
def from_(self):
return self.value[0]
@cached_property
def to_(self):
return self.value[1]
def _evaluate(self, column):
upper_operator = sql.operators.gt if self.strong else sql.operators.ge
lower_operator = sql.operators.lt if self.strong else sql.operators.le
from_, to_ = self.get_value()
conditions = []
if self.from_ is not None:
conditions.append(upper_operator(column, from_))
if self.to_ is not None:
conditions.append(lower_operator(column, to_))
return sql.expression.and_(*conditions)
class OptionalCondition(ConditionBase):
def __init__(self, values, invert=False, **options):
assert isinstance(values, list), 'Should be a list of Conditions'
super(OptionalCondition, self).__init__(None, values, invert, **options)
def __repr__(self):
return '<{}({})>'.format(
self.__class__.__name__,
self.value,
)
def bind(self, model):
self.model = model
for child in self.value:
child.bind(model)
def _all_attributes(self):
result = []
for condition in self.value:
attrs = condition._all_attributes()
result.extend(attrs)
return result
def _evaluate(self, mapper):
conditions = [v.evaluate(mapper) for v in self.value]
return sql.expression.or_(*conditions)
def _get_column(self, mapper):
return mapper
|
en
| 0.897339
|
# -*- coding: utf-8 -*- Object describing way of slicing a cube through point in a dimension # in some cases the value is an empty tuple. # using empty or_() expression as a workaround Object describing way of slicing a cube between two points of a dimension that has ordered points. For dimensions with unordered points behaviour is unknown.
| 2.036719
| 2
|
models/tinydb/rediscommand.py
|
pythononwheels/redmonty
| 3
|
6625444
|
#
# TinyDB Model: Rediscommand
#
from redmonty.models.tinydb.tinymodel import TinyModel
class Rediscommand(TinyModel):
#
# Use the cerberus schema style
# which offer you immediate validation with cerberus
# http://docs.python-cerberus.org/en/stable/validation-rules.html
# types: http://docs.python-cerberus.org/en/stable/validation-rules.html#type
#
schema = {
'name' : { 'type' : 'string', 'maxlength' : 35 },
'category' : { 'type' : 'string' },
'summary' : { 'type' : 'string' },
"args" : { "type" : "list", "default" : []},
'help_link' : { 'type' : 'string' },
"help_text" : { "type" : "string" }
}
# define class attributes/variables here that should be included in to_dict()
# conversion and also handed to the encoders but that are NOT part of the schema.
include_attributes=[]
#
# init
#
def __init__(self, **kwargs):
self.init_on_load(**kwargs)
#
# your model's methods down here
#
|
#
# TinyDB Model: Rediscommand
#
from redmonty.models.tinydb.tinymodel import TinyModel
class Rediscommand(TinyModel):
#
# Use the cerberus schema style
# which offer you immediate validation with cerberus
# http://docs.python-cerberus.org/en/stable/validation-rules.html
# types: http://docs.python-cerberus.org/en/stable/validation-rules.html#type
#
schema = {
'name' : { 'type' : 'string', 'maxlength' : 35 },
'category' : { 'type' : 'string' },
'summary' : { 'type' : 'string' },
"args" : { "type" : "list", "default" : []},
'help_link' : { 'type' : 'string' },
"help_text" : { "type" : "string" }
}
# define class attributes/variables here that should be included in to_dict()
# conversion and also handed to the encoders but that are NOT part of the schema.
include_attributes=[]
#
# init
#
def __init__(self, **kwargs):
self.init_on_load(**kwargs)
#
# your model's methods down here
#
|
en
| 0.772644
|
# # TinyDB Model: Rediscommand # # # Use the cerberus schema style # which offer you immediate validation with cerberus # http://docs.python-cerberus.org/en/stable/validation-rules.html # types: http://docs.python-cerberus.org/en/stable/validation-rules.html#type # # define class attributes/variables here that should be included in to_dict() # conversion and also handed to the encoders but that are NOT part of the schema. # # init # # # your model's methods down here #
| 2.369339
| 2
|
run_server.py
|
tartley/restful-categories-exercise
| 0
|
6625445
|
<reponame>tartley/restful-categories-exercise
from restful.server import main
main()
|
from restful.server import main
main()
|
none
| 1
| 1.137229
| 1
|
|
wagtail/snippets/widgets.py
|
Immensa/wagtail
| 2
|
6625446
|
import json
from django import forms
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from wagtail.admin.staticfiles import versioned_static
from wagtail.admin.widgets import AdminChooser
class AdminSnippetChooser(AdminChooser):
def __init__(self, model, **kwargs):
self.target_model = model
name = self.target_model._meta.verbose_name
self.choose_one_text = _('Choose %s') % name
self.choose_another_text = _('Choose another %s') % name
self.link_to_chosen_text = _('Edit this %s') % name
super().__init__(**kwargs)
def render_html(self, name, value, attrs):
instance, value = self.get_instance_and_id(self.target_model, value)
original_field_html = super().render_html(name, value, attrs)
return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", {
'widget': self,
'model_opts': self.target_model._meta,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'item': instance,
})
def render_js_init(self, id_, name, value):
model = self.target_model
return "createSnippetChooser({id}, {model});".format(
id=json.dumps(id_),
model=json.dumps('{app}/{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)))
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailsnippets/js/snippet-chooser-modal.js'),
versioned_static('wagtailsnippets/js/snippet-chooser.js'),
])
|
import json
from django import forms
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from wagtail.admin.staticfiles import versioned_static
from wagtail.admin.widgets import AdminChooser
class AdminSnippetChooser(AdminChooser):
def __init__(self, model, **kwargs):
self.target_model = model
name = self.target_model._meta.verbose_name
self.choose_one_text = _('Choose %s') % name
self.choose_another_text = _('Choose another %s') % name
self.link_to_chosen_text = _('Edit this %s') % name
super().__init__(**kwargs)
def render_html(self, name, value, attrs):
instance, value = self.get_instance_and_id(self.target_model, value)
original_field_html = super().render_html(name, value, attrs)
return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", {
'widget': self,
'model_opts': self.target_model._meta,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'item': instance,
})
def render_js_init(self, id_, name, value):
model = self.target_model
return "createSnippetChooser({id}, {model});".format(
id=json.dumps(id_),
model=json.dumps('{app}/{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)))
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailsnippets/js/snippet-chooser-modal.js'),
versioned_static('wagtailsnippets/js/snippet-chooser.js'),
])
|
none
| 1
| 2.111125
| 2
|
|
portal/migrations/versions/2e1421ac841a_.py
|
uwcirg/true_nth_usa_portal
| 3
|
6625447
|
"""flush qb_timeline cache for all suspended workers
Revision ID: 2e1421ac841a
Revises: <PASSWORD>
Create Date: 2019-04-16 17:08:12.928760
"""
import sqlalchemy as sa
from portal.database import db
from portal.models.qb_timeline import QBT
from portal.models.role import ROLE, Role
from portal.models.user import User, UserRoles
from portal.models.user_consent import UserConsent
def purge_suspended_patients_qbs():
suspended = User.query.join(UserRoles).join(Role).join(
UserConsent).filter(sa.and_(
Role.id == UserRoles.role_id, UserRoles.user_id == User.id,
Role.name == ROLE.PATIENT.value)).filter(
UserConsent.status == 'suspended').with_entities(User.id)
qbts = QBT.query.filter(QBT.user_id.in_(suspended))
qbts.delete(synchronize_session=False)
db.session.commit()
# revision identifiers, used by Alembic.
revision = '2e1421ac841a'
down_revision = '<PASSWORD>'
def upgrade():
purge_suspended_patients_qbs()
def downgrade():
purge_suspended_patients_qbs()
|
"""flush qb_timeline cache for all suspended workers
Revision ID: 2e1421ac841a
Revises: <PASSWORD>
Create Date: 2019-04-16 17:08:12.928760
"""
import sqlalchemy as sa
from portal.database import db
from portal.models.qb_timeline import QBT
from portal.models.role import ROLE, Role
from portal.models.user import User, UserRoles
from portal.models.user_consent import UserConsent
def purge_suspended_patients_qbs():
suspended = User.query.join(UserRoles).join(Role).join(
UserConsent).filter(sa.and_(
Role.id == UserRoles.role_id, UserRoles.user_id == User.id,
Role.name == ROLE.PATIENT.value)).filter(
UserConsent.status == 'suspended').with_entities(User.id)
qbts = QBT.query.filter(QBT.user_id.in_(suspended))
qbts.delete(synchronize_session=False)
db.session.commit()
# revision identifiers, used by Alembic.
revision = '2e1421ac841a'
down_revision = '<PASSWORD>'
def upgrade():
purge_suspended_patients_qbs()
def downgrade():
purge_suspended_patients_qbs()
|
en
| 0.556571
|
flush qb_timeline cache for all suspended workers Revision ID: 2e1421ac841a Revises: <PASSWORD> Create Date: 2019-04-16 17:08:12.928760 # revision identifiers, used by Alembic.
| 1.859296
| 2
|
src/demos/demo_2.py
|
jduncan1980/CSPT15_DS_ALGO_LinkedLists_GP
| 0
|
6625448
|
"""
Given a reference to the head node of a singly-linked list, write a function
that reverses the linked list in place. The function should return the new head
of the reversed list.
In order to do this in O(1) space (in-place), you cannot make a new list, you
need to use the existing nodes.
In order to do this in O(n) time, you should only have to traverse the list
once.
*Note: If you get stuck, try drawing a picture of a small linked list and
running your function by hand. Does it actually work? Also, don't forget to
consider edge cases (like a list with only 1 or 0 elements).*
"""
class LinkedListNode():
def __init__(self, value):
self.value = value
self.next = None
def reverse(head_of_list):
current_node = head_of_list
prev = None
next = None
while current_node:
next = current_node.next
current_node.next = prev
prev = current_node
current_node = next
return prev
x = LinkedListNode('X')
y = LinkedListNode('Y')
z = LinkedListNode('Z')
x.next = y
y.next = z
reverse(x)
print(x.value, y.value, z.value)
|
"""
Given a reference to the head node of a singly-linked list, write a function
that reverses the linked list in place. The function should return the new head
of the reversed list.
In order to do this in O(1) space (in-place), you cannot make a new list, you
need to use the existing nodes.
In order to do this in O(n) time, you should only have to traverse the list
once.
*Note: If you get stuck, try drawing a picture of a small linked list and
running your function by hand. Does it actually work? Also, don't forget to
consider edge cases (like a list with only 1 or 0 elements).*
"""
class LinkedListNode():
def __init__(self, value):
self.value = value
self.next = None
def reverse(head_of_list):
current_node = head_of_list
prev = None
next = None
while current_node:
next = current_node.next
current_node.next = prev
prev = current_node
current_node = next
return prev
x = LinkedListNode('X')
y = LinkedListNode('Y')
z = LinkedListNode('Z')
x.next = y
y.next = z
reverse(x)
print(x.value, y.value, z.value)
|
en
| 0.879814
|
Given a reference to the head node of a singly-linked list, write a function that reverses the linked list in place. The function should return the new head of the reversed list. In order to do this in O(1) space (in-place), you cannot make a new list, you need to use the existing nodes. In order to do this in O(n) time, you should only have to traverse the list once. *Note: If you get stuck, try drawing a picture of a small linked list and running your function by hand. Does it actually work? Also, don't forget to consider edge cases (like a list with only 1 or 0 elements).*
| 4.220814
| 4
|
readData.py
|
nmchgx/netflix-recommendation
| 0
|
6625449
|
<filename>readData.py
#! python2
# coding: utf-8
import numpy as np
import pandas as pd
from tqdm import tqdm
def saveUserKey():
data = np.loadtxt('data/users.txt', dtype='int')
np.save('output/user_key.npy', np.array([data, range(0, len(data))]).T)
def loadUserKey():
return dict(np.load('data/user_key.npy'))
def initMatrix(data, name):
users = np.load('output/user_key.npy')
movies = pd.read_table('data/movie_titles.txt', header=None,
delim_whitespace=True, names=['movie_id', 'year', 'title'])
usersDict = dict(users)
users_len = len(users)
movie_len = len(movies)
matrix = np.zeros((users_len, movie_len), dtype='float32')
for item in tqdm(data):
matrix[usersDict[item[0]]][item[1] - 1] = item[2]
np.save(name, matrix)
def saveTrainMatrix():
df = pd.read_table('data/netflix_train.txt', header=None, delim_whitespace=True,
names=['user_id', 'movie_id', 'score', 'time'])
data = np.array(df[['user_id', 'movie_id', 'score']].values, dtype='int')
initMatrix(data, 'output/matrix_train.npy')
def loadTrainMatrix():
return np.load('data/matrix_train.npy')
def saveTestMatrix():
df = pd.read_table('data/netflix_test.txt', header=None, delim_whitespace=True,
names=['user_id', 'movie_id', 'score', 'time'])
data = np.array(df[['user_id', 'movie_id', 'score']].values, dtype='int')
initMatrix(data, 'output/matrix_test.npy')
def loadTestMatrix():
return np.load('data/matrix_test.npy')
def saveSimMatrix():
data = np.load('output/matrix_train.npy')
data2 = np.dot(data, data.T)
matrix = np.dot(data, data.T) / np.dot(np.linalg.norm(data, axis=1).reshape(len(data), 1),
np.linalg.norm(data.T, axis=0).reshape(1, len(data)))
np.save('output/matrix_sim_train.npy', matrix)
def loadSimMatrix():
return np.load('data/matrix_sim_train.npy')
def getTestLen():
return len(pd.read_table('data/netflix_test.txt', header=None, delim_whitespace=True))
|
<filename>readData.py
#! python2
# coding: utf-8
import numpy as np
import pandas as pd
from tqdm import tqdm
def saveUserKey():
data = np.loadtxt('data/users.txt', dtype='int')
np.save('output/user_key.npy', np.array([data, range(0, len(data))]).T)
def loadUserKey():
return dict(np.load('data/user_key.npy'))
def initMatrix(data, name):
users = np.load('output/user_key.npy')
movies = pd.read_table('data/movie_titles.txt', header=None,
delim_whitespace=True, names=['movie_id', 'year', 'title'])
usersDict = dict(users)
users_len = len(users)
movie_len = len(movies)
matrix = np.zeros((users_len, movie_len), dtype='float32')
for item in tqdm(data):
matrix[usersDict[item[0]]][item[1] - 1] = item[2]
np.save(name, matrix)
def saveTrainMatrix():
df = pd.read_table('data/netflix_train.txt', header=None, delim_whitespace=True,
names=['user_id', 'movie_id', 'score', 'time'])
data = np.array(df[['user_id', 'movie_id', 'score']].values, dtype='int')
initMatrix(data, 'output/matrix_train.npy')
def loadTrainMatrix():
return np.load('data/matrix_train.npy')
def saveTestMatrix():
df = pd.read_table('data/netflix_test.txt', header=None, delim_whitespace=True,
names=['user_id', 'movie_id', 'score', 'time'])
data = np.array(df[['user_id', 'movie_id', 'score']].values, dtype='int')
initMatrix(data, 'output/matrix_test.npy')
def loadTestMatrix():
return np.load('data/matrix_test.npy')
def saveSimMatrix():
data = np.load('output/matrix_train.npy')
data2 = np.dot(data, data.T)
matrix = np.dot(data, data.T) / np.dot(np.linalg.norm(data, axis=1).reshape(len(data), 1),
np.linalg.norm(data.T, axis=0).reshape(1, len(data)))
np.save('output/matrix_sim_train.npy', matrix)
def loadSimMatrix():
return np.load('data/matrix_sim_train.npy')
def getTestLen():
return len(pd.read_table('data/netflix_test.txt', header=None, delim_whitespace=True))
|
en
| 0.469341
|
#! python2 # coding: utf-8
| 2.820783
| 3
|
tests/test_preprocessing.py
|
thgbaum/plpred
| 0
|
6625450
|
<gh_stars>0
from plpred.preprocessing import compute_aa_composition, generate_aa_composition_df
def test_compute_aa_composition_result_simple_homopolymer():
protein_sequence = 'AAAAAA'
aa_composition = compute_aa_composition(protein_sequence)
assert aa_composition['A'] == 1
def test_compute_aa_composition_result_complex_heteropolymer():
protein_sequence ='AWGY'
aa_composition = compute_aa_composition(protein_sequence)
assert aa_composition['A'] == 0.25
assert aa_composition['W'] == 0.25
assert aa_composition['G'] == 0.25
assert aa_composition['Y'] == 0.25
def test_compute_aa_composition_return_type():
protein_sequence = 'AWGY'
aa_composition = compute_aa_composition(protein_sequence)
assert isinstance(aa_composition, dict)
def test_generate_aa_composition_df_column_number():
file_path = 'data/raw/membrane.fasta'
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=1)
assert df_aa_composition.shape[1] == 21
def test_generate_aa_composition_df_membrane_column():
file_path = 'data/raw/membrane.fasta'
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=1)
assert 'membrane' in df_aa_composition.columns
def test_generate_aa_composition_df_membrane_column_values():
file_path = 'data/raw/membrane.fasta'
membrane_labels = [0, 1, 2, 3, 4]
for membrane_label in membrane_labels:
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=membrane_label)
assert all(df_aa_composition['membrane'] == membrane_label)
|
from plpred.preprocessing import compute_aa_composition, generate_aa_composition_df
def test_compute_aa_composition_result_simple_homopolymer():
protein_sequence = 'AAAAAA'
aa_composition = compute_aa_composition(protein_sequence)
assert aa_composition['A'] == 1
def test_compute_aa_composition_result_complex_heteropolymer():
protein_sequence ='AWGY'
aa_composition = compute_aa_composition(protein_sequence)
assert aa_composition['A'] == 0.25
assert aa_composition['W'] == 0.25
assert aa_composition['G'] == 0.25
assert aa_composition['Y'] == 0.25
def test_compute_aa_composition_return_type():
protein_sequence = 'AWGY'
aa_composition = compute_aa_composition(protein_sequence)
assert isinstance(aa_composition, dict)
def test_generate_aa_composition_df_column_number():
file_path = 'data/raw/membrane.fasta'
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=1)
assert df_aa_composition.shape[1] == 21
def test_generate_aa_composition_df_membrane_column():
file_path = 'data/raw/membrane.fasta'
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=1)
assert 'membrane' in df_aa_composition.columns
def test_generate_aa_composition_df_membrane_column_values():
file_path = 'data/raw/membrane.fasta'
membrane_labels = [0, 1, 2, 3, 4]
for membrane_label in membrane_labels:
df_aa_composition = generate_aa_composition_df(file_path, membrane_label=membrane_label)
assert all(df_aa_composition['membrane'] == membrane_label)
|
none
| 1
| 2.324775
| 2
|