hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7956725b7b05c80f94a04b71cb40587319a509c5 | 4,852 | py | Python | python/ray/internal/internal_api.py | Crissman/ray | 2092b097eab41b118a117fdfadd0fe664db41f63 | [
"Apache-2.0"
] | 3 | 2021-06-22T19:57:41.000Z | 2021-06-23T07:16:44.000Z | python/ray/internal/internal_api.py | h453693821/ray | 9eb79727aa6ad94b01f8b660b83e1182555a89f6 | [
"Apache-2.0"
] | 72 | 2021-02-06T08:07:16.000Z | 2022-03-26T07:17:49.000Z | python/ray/internal/internal_api.py | h453693821/ray | 9eb79727aa6ad94b01f8b660b83e1182555a89f6 | [
"Apache-2.0"
] | 2 | 2021-05-05T21:05:16.000Z | 2021-06-22T21:16:03.000Z | import ray
import ray.worker
from ray import profiling
__all__ = ["free", "global_gc"]
MAX_MESSAGE_LENGTH = ray._config.max_grpc_message_size()
def global_gc():
"""Trigger gc.collect() on all workers in the cluster."""
worker = ray.worker.global_worker
worker.core_worker.global_gc()
def memory_summary(node_manager_address=None,
node_manager_port=None,
stats_only=False):
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info, that Raylet internally
# asks all nodes in the cluster for memory stats.
if (node_manager_address is None or node_manager_port is None):
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
raylet["NodeManagerPort"])
else:
raylet_address = "{}:{}".format(node_manager_address,
node_manager_port)
channel = grpc.insecure_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
store_summary = "--- Aggregate object store stats across all nodes ---\n"
store_summary += (
"Plasma memory usage {} MiB, {} objects, {}% full\n".format(
int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),
reply.store_stats.num_local_objects,
round(
100 * reply.store_stats.object_store_bytes_used /
reply.store_stats.object_store_bytes_avail, 2)))
if reply.store_stats.spill_time_total_s > 0:
store_summary += (
"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".
format(
int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),
reply.store_stats.spilled_objects_total,
int(reply.store_stats.spilled_bytes_total / (1024 * 1024) /
reply.store_stats.spill_time_total_s)))
if reply.store_stats.restore_time_total_s > 0:
store_summary += (
"Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".
format(
int(reply.store_stats.restored_bytes_total / (1024 * 1024)),
reply.store_stats.restored_objects_total,
int(reply.store_stats.restored_bytes_total / (1024 * 1024) /
reply.store_stats.restore_time_total_s)))
if reply.store_stats.consumed_bytes > 0:
store_summary += ("Objects consumed by Ray tasks: {} MiB.".format(
int(reply.store_stats.consumed_bytes / (1024 * 1024))))
if stats_only:
return store_summary
return reply.memory_summary + "\n" + store_summary
def free(object_refs, local_only=False):
"""Free a list of IDs from the in-process and plasma object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to the object store. If
some of the objects are in use, the object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_refs (List[ObjectRef]): List of object refs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
"""
worker = ray.worker.global_worker
if isinstance(object_refs, ray.ObjectRef):
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise TypeError("free() expects a list of ObjectRef, got {}".format(
type(object_refs)))
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"Attempting to call `free` on the value {}, "
"which is not an ray.ObjectRef.".format(object_ref))
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_refs) == 0:
return
worker.core_worker.free_objects(object_refs, local_only)
| 40.099174 | 78 | 0.643446 |
795673df415307148cd68265b981e0cfbde63702 | 25,225 | py | Python | src/dataset_SGFN.py | orsveri/3DSSG | 9f0bb09389769e0a0bf7e85eb997545fe2221049 | [
"BSD-2-Clause"
] | 38 | 2021-05-06T14:22:17.000Z | 2022-03-22T06:07:01.000Z | src/dataset_SGFN.py | orsveri/3DSSG | 9f0bb09389769e0a0bf7e85eb997545fe2221049 | [
"BSD-2-Clause"
] | 23 | 2021-05-11T08:44:56.000Z | 2022-02-26T18:08:22.000Z | src/dataset_SGFN.py | orsveri/3DSSG | 9f0bb09389769e0a0bf7e85eb997545fe2221049 | [
"BSD-2-Clause"
] | 7 | 2021-07-16T08:20:33.000Z | 2022-03-24T14:36:43.000Z | if __name__ == '__main__' and __package__ is None:
from os import sys
sys.path.append('../')
import torch.utils.data as data
import os, random, torch, json, trimesh
import numpy as np
import multiprocessing as mp
from utils import util_ply, util_data, util, define
from data_processing import compute_weight_occurrences
import op_utils
def load_mesh(path,label_file,use_rgb,use_normal):
result=dict()
if label_file == 'labels.instances.align.annotated.v2.ply' or label_file == 'labels.instances.align.annotated.ply':
if use_rgb:
plydata = util_ply.load_rgb(path)
else:
plydata = trimesh.load(os.path.join(path,label_file), process=False)
points = np.array(plydata.vertices)
instances = util_ply.read_labels(plydata).flatten()
if use_rgb:
r = plydata.metadata['ply_raw']['vertex']['data']['red']
g = plydata.metadata['ply_raw']['vertex']['data']['green']
b = plydata.metadata['ply_raw']['vertex']['data']['blue']
rgb = np.stack([ r,g,b]).squeeze().transpose()
points = np.concatenate((points, rgb), axis=1)
if use_normal:
nx = plydata.metadata['ply_raw']['vertex']['data']['nx']
ny = plydata.metadata['ply_raw']['vertex']['data']['ny']
nz = plydata.metadata['ply_raw']['vertex']['data']['nz']
normal = np.stack([ nx,ny,nz ]).squeeze().transpose()
points = np.concatenate((points, normal), axis=1)
result['points']=points
result['instances']=instances
else:# label_file.find('inseg')>=0 or label_file == 'cvvseg.ply':
plydata = trimesh.load(os.path.join(path,label_file), process=False)
points = np.array(plydata.vertices)
instances = plydata.metadata['ply_raw']['vertex']['data']['label'].flatten()
if use_rgb:
rgbs = np.array(plydata.colors)[:,:3] / 255.0 * 2 - 1.0
points = np.concatenate((points, rgbs), axis=1)
if use_normal:
nx = plydata.metadata['ply_raw']['vertex']['data']['nx']
ny = plydata.metadata['ply_raw']['vertex']['data']['ny']
nz = plydata.metadata['ply_raw']['vertex']['data']['nz']
normal = np.stack([ nx,ny,nz ]).squeeze().transpose()
points = np.concatenate((points, normal), axis=1)
result['points']=points
result['instances']=instances
# else:
# raise NotImplementedError('')
return result
def dataset_loading_3RScan(root:str, pth_selection:str,split:str,class_choice:list=None):
pth_catfile = os.path.join(pth_selection, 'classes.txt')
classNames = util.read_txt_to_list(pth_catfile)
pth_relationship = os.path.join(pth_selection, 'relationships.txt')
util.check_file_exist(pth_relationship)
relationNames = util.read_relationships(pth_relationship)
selected_scans=set()
data = dict()
if split == 'train_scans' :
selected_scans = selected_scans.union(util.read_txt_to_list(os.path.join(pth_selection,'train_scans.txt')))
with open(os.path.join(root, 'relationships_train.json'), "r") as read_file:
data1 = json.load(read_file)
elif split == 'validation_scans':
selected_scans = selected_scans.union(util.read_txt_to_list(os.path.join(pth_selection,'validation_scans.txt')))
with open(os.path.join(root, 'relationships_validation.json'), "r") as read_file:
data1 = json.load(read_file)
elif split == 'test_scans':
selected_scans = selected_scans.union(util.read_txt_to_list(os.path.join(pth_selection,'test_scans.txt')))
with open(os.path.join(root, 'relationships_test.json'), "r") as read_file:
data1 = json.load(read_file)
else:
raise RuntimeError('unknown split type.')
# with open(os.path.join(root, 'relationships_train.json'), "r") as read_file:
# data1 = json.load(read_file)
# with open(os.path.join(root, 'relationships_validation.json'), "r") as read_file:
# data2 = json.load(read_file)
# with open(os.path.join(root, 'relationships_test.json'), "r") as read_file:
# data3 = json.load(read_file)
data['scans'] = data1['scans']# + data2['scans'] + data3['scans']
if 'neighbors' in data1:
data['neighbors'] = data1['neighbors']#{**data1['neighbors'], **data2['neighbors'], **data3['neighbors']}
return classNames, relationNames, data, selected_scans
class SGFNDataset (data.Dataset):
def __init__(self,
config,
split='train',
multi_rel_outputs=True,
shuffle_objs=True,
use_rgb = False,
use_normal = False,
load_cache = False,
sample_in_runtime=True,
for_eval = False,
max_edges = -1,
data_augmentation=True):
assert split in ['train_scans', 'validation_scans','test_scans']
self.config = config
self.mconfig = config.dataset
self.use_data_augmentation=data_augmentation
self.root = self.mconfig.root
self.root_3rscan = define.DATA_PATH
try:
self.root_scannet = define.SCANNET_DATA_PATH
except:
self.root_scannet = None
selected_scans = set()
self.w_cls_obj=self.w_cls_rel=None
self.multi_rel_outputs = multi_rel_outputs
self.shuffle_objs = shuffle_objs
self.use_rgb = use_rgb
self.use_normal = use_normal
self.sample_in_runtime=sample_in_runtime
self.load_cache = load_cache
self.for_eval = for_eval
self.max_edges=max_edges
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
if isinstance(self.root, list):
with open(os.path.join(self.root[0],'args.json'), 'r') as f:
jf = json.load(f)
self.label_type = jf['label_type']
classNames = None
relationNames = None
data = None
selected_scans = None
for i in range(len(self.root)):
selection = self.mconfig.selection
if selection == "":
selection = self.root[i]
l_classNames, l_relationNames, l_data, l_selected_scans = \
dataset_loading_3RScan(self.root[i], selection, split)
if classNames is None:
classNames, relationNames, data, selected_scans = \
l_classNames, l_relationNames, l_data, l_selected_scans
else:
classNames = set(classNames).union(l_classNames)
relationNames= set(relationNames).union(l_relationNames)
data['scans'] = l_data['scans'] + data['scans']
data['neighbors'] = {**l_data['neighbors'], **data['neighbors']}
selected_scans = selected_scans.union(l_selected_scans)
classNames = list(classNames)
relationNames = list(relationNames)
else:
with open(os.path.join(self.root,'args.json'), 'r') as f:
jf = json.load(f)
self.label_type = jf['label_type']
if self.mconfig.selection == "":
self.mconfig.selection = self.root
classNames, relationNames, data, selected_scans = \
dataset_loading_3RScan(self.root, self.mconfig.selection, split)
self.relationNames = sorted(relationNames)
self.classNames = sorted(classNames)
if not multi_rel_outputs:
if 'none' not in self.relationNames:
self.relationNames.append('none')
wobjs, wrels, o_obj_cls, o_rel_cls = compute_weight_occurrences.compute(self.classNames, self.relationNames, data,selected_scans)
self.w_cls_obj = torch.from_numpy(np.array(o_obj_cls)).float().to(self.config.DEVICE)
self.w_cls_rel = torch.from_numpy(np.array(o_rel_cls)).float().to(self.config.DEVICE)
if not multi_rel_outputs:
self.w_cls_rel[-1] = self.w_cls_rel.max()*10
if False:
''' 1/log(x)'''
self.w_cls_obj = torch.abs(1.0 / (torch.log(self.w_cls_obj)+1))
self.w_cls_rel = torch.abs(1.0 / (torch.log(self.w_cls_rel)+1))
else:
''' inverse sum'''
self.w_cls_obj = self.w_cls_obj.sum() / (self.w_cls_obj + 1) /self.w_cls_obj.sum()
self.w_cls_rel = self.w_cls_rel.sum() / (self.w_cls_rel + 1) /self.w_cls_rel.sum()
self.w_cls_obj /= self.w_cls_obj.max()
self.w_cls_rel /= self.w_cls_rel.max()
if self.config.VERBOSE:
print('=== {} classes ==='.format(len(self.classNames)))
for i in range(len(self.classNames)):
print('|{0:>3d} {1:>20s}'.format(i,self.classNames[i]),end='')
if self.w_cls_obj is not None:
print(':{0:>1.5f}|'.format(self.w_cls_obj[i]),end='')
if (i+1) % 2 ==0:
print('')
print('')
print('=== {} relationships ==='.format(len(self.relationNames)))
for i in range(len(self.relationNames)):
print('|{0:>3d} {1:>20s}'.format(i,self.relationNames[i]),end=' ')
if self.w_cls_rel is not None:
print('{0:>1.5f}|'.format(self.w_cls_rel[i]),end='')
if (i+1) % 2 ==0:
print('')
print('')
self.relationship_json, self.objs_json, self.scans, self.nns = self.read_relationship_json(data, selected_scans)
if self.config.VERBOSE:
print('num of data:',len(self.scans))
assert(len(self.scans)>0)
if sample_in_runtime:
assert(self.nns is not None)
self.dim_pts = 3
if self.use_rgb:
self.dim_pts += 3
if self.use_normal:
self.dim_pts += 3
self.cache_data = dict()
if load_cache:
pool = mp.Pool(8)
pool.daemon = True
# resutls=dict()
for scan_id in self.scans:
scan_id_no_split = scan_id.rsplit('_',1)[0]
if 'scene' in scan_id:
path = os.path.join(self.root_scannet, scan_id_no_split)
else:
path = os.path.join(self.root_3rscan, scan_id_no_split)
if scan_id_no_split not in self.cache_data:
self.cache_data[scan_id_no_split] = pool.apply_async(load_mesh,
(path, self.mconfig.label_file,self.use_rgb,self.use_normal))
pool.close()
pool.join()
for key, item in self.cache_data.items():
self.cache_data[key] = item.get()
def data_augmentation(self, points):
# random rotate
matrix= np.eye(3)
matrix[0:3,0:3] = op_utils.rotation_matrix([0,0,1], np.random.uniform(0,2*np.pi,1))
centroid = points[:,:3].mean(0)
points[:,:3] -= centroid
points[:,:3] = np.dot(points[:,:3], matrix.T)
if self.use_normal:
ofset=3
if self.use_rgb:
ofset+=3
points[:,ofset:3+ofset] = np.dot(points[:,ofset:3+ofset], matrix.T)
## Add noise
# ## points
# noise = np.random.normal(0,1e-3,[points.shape[0],3]) # 1 mm std
# points[:,:3] += noise
# ## colors
# if self.use_rgb:
# noise = np.random.normal(0,0.078,[points.shape[0],3])
# colors = points[:,3:6]
# colors += noise
# colors[np.where(colors>1)] = 1
# colors[np.where(colors<-1)] = -1
# ## normals
# if self.use_normal:
# ofset=3
# if self.use_rgb:
# ofset+=3
# normals = points[:,ofset:3+ofset]
# normals = np.dot(normals, matrix.T)
# noise = np.random.normal(0,1e-4,[points.shape[0],3])
# normals += noise
# normals = normals/ np.linalg.norm(normals)
return points
def __getitem__(self, index):
scan_id = self.scans[index]
scan_id_no_split = scan_id.rsplit('_',1)[0]
instance2labelName = self.objs_json[scan_id]
if self.load_cache:
data = self.cache_data[scan_id_no_split]
else:
if 'scene' in scan_id:
path = os.path.join(self.root_scannet, scan_id_no_split)
else:
path = os.path.join(self.root_3rscan, scan_id_no_split)
data = load_mesh(path, self.mconfig.label_file,self.use_rgb,self.use_normal)
points = data['points']
instances = data['instances']
instances_id = list(np.unique(instances))
if self.use_data_augmentation and not self.for_eval:
points = self.data_augmentation(points)
if self.sample_in_runtime:
if not self.for_eval:
sample_num_nn=1
sample_num_seed=1
if "sample_num_nn" in self.mconfig:
sample_num_nn = self.mconfig.sample_num_nn
if "sample_num_seed" in self.mconfig:
sample_num_seed = self.mconfig.sample_num_seed
filtered_nodes = util_data.build_neighbor(self.nns[scan_id_no_split], instance2labelName,
sample_num_nn, sample_num_seed) # select 1 node and include their neighbor nodes n times.
else:
selected_nodes = list(self.objs_json[scan_id].keys())
filtered_nodes = selected_nodes # use all nodes
edge_indices = util_data.build_edge_from_selection(filtered_nodes, self.nns[scan_id_no_split], max_edges_per_node=-1)
instances_id = list(filtered_nodes)
if self.mconfig.drop_edge>0 and not self.for_eval:
percentage = np.random.uniform(low=1-self.mconfig.drop_edge, high=1.0,size=1)[0]
num_edge = int(float(len(edge_indices))*percentage//1)
if num_edge > 0:
choices = np.random.choice(range(len(edge_indices)),num_edge,replace=False).tolist()
edge_indices = [edge_indices[t] for t in choices]
if self.for_eval and self.mconfig.drop_edge_eval > 0:
percentage = 1.0-self.mconfig.drop_edge
num_edge = int(float(len(edge_indices))*percentage//1)
if num_edge > 0:
choices = np.random.choice(range(len(edge_indices)),num_edge,replace=False).tolist()
edge_indices = [edge_indices[t] for t in choices]
if self.max_edges > 0 and len(edge_indices) > self.max_edges:
choices = np.random.choice(range(len(edge_indices)),self.max_edges,replace=False).tolist()
edge_indices = [edge_indices[t] for t in choices]
if 0 in instances_id:
instances_id.remove(0)
if self.shuffle_objs:
random.shuffle(instances_id)
instance2mask = {}
instance2mask[0] = 0
rel_json = self.relationship_json[scan_id]
'''
Find instances we care abot. Build instance2mask and cat list
instance2mask maps instances to a mask id. to randomize the order of instance in training.
'''
cat = []
counter = 0
selected_instances = list(self.objs_json[scan_id].keys())
filtered_instances = list()
for i in range(len(instances_id)):
instance_id = instances_id[i]
class_id = -1
if instance_id not in selected_instances:
instance2mask[instance_id] = 0
continue
instance_labelName = instance2labelName[instance_id]
if instance_labelName in self.classNames:
class_id = self.classNames.index(instance_labelName)
if class_id != -1:
counter += 1
instance2mask[instance_id] = counter
else:
instance2mask[instance_id] = 0
# mask to cat:
if (class_id >= 0) and (instance_id > 0): # insstance 0 is unlabeled.
filtered_instances.append(instance_id)
cat.append(class_id)
'''Map edge indices to mask indices'''
if self.sample_in_runtime:
edge_indices = [[instance2mask[edge[0]]-1,instance2mask[edge[1]]-1] for edge in edge_indices ]
else:
''' Build fully connected edges '''
edge_indices = list()
max_edges=-1
for n in range(len(cat)):
for m in range(len(cat)):
if n == m:continue
edge_indices.append([n,m])
if max_edges>0 and len(edge_indices) > max_edges :
# for eval, do not drop out any edges.
indices = list(np.random.choice(len(edge_indices),self.max_edges,replace=False))
edge_indices = edge_indices[indices]
''' random sample points '''
use_obj_context=False #TODO: not here
obj_points = torch.zeros([len(cat), self.mconfig.num_points, self.dim_pts])
descriptor = torch.zeros([len(cat), 11])
for i in range(len(filtered_instances)):
instance_id = filtered_instances[i]
obj_pointset = points[np.where(instances== instance_id)[0], :]
if use_obj_context:
min_box = np.min(obj_pointset[:,:3], 0) - 0.02
max_box = np.max(obj_pointset[:,:3], 0) + 0.02
filter_mask = (points[:,0] > min_box[0]) * (points[:,0] < max_box[0]) \
* (points[:,1] > min_box[1]) * (points[:,1] < max_box[1]) \
* (points[:,2] > min_box[2]) * (points[:,2] < max_box[2])
obj_pointset = points[np.where(filter_mask > 0)[0], :]
if len(obj_pointset) == 0:
print('scan_id:',scan_id)
print('selected_instances:',len(selected_instances))
print('filtered_instances:',len(filtered_instances))
print('instance_id:',instance_id)
choice = np.random.choice(len(obj_pointset), self.mconfig.num_points, replace= len(obj_pointset) < self.mconfig.num_points)
obj_pointset = obj_pointset[choice, :]
descriptor[i] = op_utils.gen_descriptor(torch.from_numpy(obj_pointset)[:,:3])
obj_pointset = torch.from_numpy(obj_pointset.astype(np.float32))
obj_pointset[:,:3] = self.norm_tensor(obj_pointset[:,:3])
obj_points[i] = obj_pointset
obj_points = obj_points.permute(0,2,1)
# noise = torch.FloatTensor(obj_points.shape).normal_(0,0.005)
# obj_points+=noise
''' Build rel class GT '''
if self.multi_rel_outputs:
adj_matrix_onehot = np.zeros([len(cat), len(cat), len(self.relationNames)])
else:
adj_matrix = np.zeros([len(cat), len(cat)])
adj_matrix += len(self.relationNames)-1 #set all to none label.
if not self.sample_in_runtime:
edge_indices = list()
max_edges=-1
for n in range(len(cat)):
for m in range(len(cat)):
if n == m:continue
edge_indices.append([n,m])
if max_edges>0 and len(edge_indices) > max_edges and not self.for_eval:
# for eval, do not drop out any edges.
indices = list(np.random.choice(len(edge_indices),max_edges,replace=False))
edge_indices = edge_indices[indices]
for r in rel_json:
if r[0] not in instance2mask or r[1] not in instance2mask: continue
index1 = instance2mask[r[0]]-1
index2 = instance2mask[r[1]]-1
if self.sample_in_runtime:
if [index1,index2] not in edge_indices: continue
if r[3] not in self.relationNames:
continue
r[2] = self.relationNames.index(r[3]) # remap the index of relationships in case of custom relationNames
# assert(r[2] == self.relationNames.index(r[3]))
if index1 >= 0 and index2 >= 0:
if self.multi_rel_outputs:
adj_matrix_onehot[index1, index2, r[2]] = 1
else:
adj_matrix[index1, index2] = r[2]
if self.multi_rel_outputs:
rel_dtype = np.float32
adj_matrix_onehot = torch.from_numpy(np.array(adj_matrix_onehot, dtype=rel_dtype))
else:
rel_dtype = np.int64
adj_matrix = torch.from_numpy(np.array(adj_matrix, dtype=rel_dtype))
if self.multi_rel_outputs:
gt_rels = torch.zeros(len(edge_indices), len(self.relationNames),dtype = torch.float)
else:
gt_rels = torch.zeros(len(edge_indices),dtype = torch.long)
for e in range(len(edge_indices)):
edge = edge_indices[e]
index1 = edge[0]
index2 = edge[1]
if self.multi_rel_outputs:
gt_rels[e,:] = adj_matrix_onehot[index1,index2,:]
else:
gt_rels[e] = adj_matrix[index1,index2]
''' Build obj class GT '''
gt_class = torch.from_numpy(np.array(cat))
edge_indices = torch.tensor(edge_indices,dtype=torch.long)
return scan_id, instance2mask, obj_points, edge_indices, gt_class, gt_rels, descriptor
def __len__(self):
return len(self.scans)
def norm_tensor(self, points):
assert points.ndim == 2
assert points.shape[1] == 3
centroid = torch.mean(points, dim=0) # N, 3
points -= centroid # n, 3, npts
furthest_distance = points.pow(2).sum(1).sqrt().max() # find maximum distance for each n -> [n]
points /= furthest_distance
return points
def read_relationship_json(self, data, selected_scans:list):
rel = dict()
objs = dict()
scans = list()
nns = None
if 'neighbors' in data:
nns = data['neighbors']
for scan in data['scans']:
if scan["scan"] == 'fa79392f-7766-2d5c-869a-f5d6cfb62fc6':
if self.mconfig.label_file == "labels.instances.align.annotated.v2.ply":
'''
In the 3RScanV2, the segments on the semseg file and its ply file mismatch.
This causes error in loading data.
To verify this, run check_seg.py
'''
continue
if scan['scan'] not in selected_scans:
continue
relationships = []
for realationship in scan["relationships"]:
relationships.append(realationship)
objects = {}
for k, v in scan["objects"].items():
objects[int(k)] = v
# filter scans that doesn't have the classes we care
instances_id = list(objects.keys())
valid_counter = 0
for instance_id in instances_id:
instance_labelName = objects[instance_id]
if instance_labelName in self.classNames: # is it a class we care about?
valid_counter+=1
# break
if valid_counter < 2: # need at least two nodes
continue
rel[scan["scan"] + "_" + str(scan["split"])] = relationships
scans.append(scan["scan"] + "_" + str(scan["split"]))
objs[scan["scan"]+"_"+str(scan['split'])] = objects
return rel, objs, scans, nns
if __name__ == '__main__':
from config import Config
config = Config('../config_example.json')
config.dataset.root = "../data/example_data/"
config.dataset.label_file = 'inseg.ply'
sample_in_runtime = True
config.dataset.data_augmentation=True
split_type = 'validation_scans' # ['train_scans', 'validation_scans','test_scans']
dataset = SGFNDataset (config,use_rgb=True,use_normal = True,
load_cache=False,sample_in_runtime=sample_in_runtime,
multi_rel_outputs=False,
for_eval=False, split=split_type,data_augmentation=config.dataset.data_augmentation)
items = dataset.__getitem__(0)
print(items) | 44.09965 | 147 | 0.561863 |
795673fe837cb2f4214b4cd7ea37670c9c2af46b | 1,979 | py | Python | osrsmath/examples/combat/old/part_II/models/generate.py | Palfore/OSRSmath | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | 5 | 2020-06-30T06:51:25.000Z | 2021-11-16T01:04:48.000Z | osrsmath/examples/combat/old/part_II/models/generate.py | Palfore/OSRS-Combat | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | 15 | 2020-06-19T14:36:38.000Z | 2021-04-16T16:17:08.000Z | osrsmath/examples/combat/old/part_II/models/generate.py | Palfore/OSRS-Combat | 373eb1e7f9702b98de318b3c708084353626a177 | [
"MIT"
] | null | null | null | from osrsmath.results.part_II.colors import colors
from osrsmath.combat.successful_hits import *
from osrsmath.results.part_II.generate_simulation import load_dataset
from pprint import pprint
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import osrsmath.config as config
if __name__ == '__main__':
showing = len(sys.argv) >= 2 and sys.argv[1] == 'show'
m_min, m_max = (1, 110)
h_min, h_max = (1, 255)
max_hits = np.array(range(m_min, m_max+1))
healths = np.array(range(h_min, h_max+1))
Ms, Hs = np.meshgrid(max_hits, healths)
sim = load_dataset(N=100_000)
def plot_error(label, inverse):
""" inverse: hinv(h, m) """
def err(a, b):
# return abs(a - b)
return abs(1 - a /b)*100
print(label)
Z = np.array([np.array([err(inverse(h, m), sim[str(h)][str(m)]) for m in max_hits]) for h in healths])
surf = ax.plot_wireframe(Ms, Hs, Z, color=colors[label], linewidth=1, label=f"{label}")
return {label: (np.average(Z), np.std(Z), np.max(Z), np.min(Z))}
for method in Model.__subclasses__():
if method.__name__ == 'Simulation':
continue
fig, ax = config.get_figure(33, 36, scale=3 if showing else 10)
plot_error(method.__name__, lambda h, m: method().turns_to_kill(h, m))
ax.tick_params(axis='z', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
ax.tick_params(axis='x', labelsize=12)
ax.set_zlabel("Percent Error", fontsize=16, labelpad=20)
plt.xlabel("Max Hit", fontsize=16, labelpad=20)
plt.ylabel("Initial Health", fontsize=16, labelpad=20)
if showing:
plt.show()
else:
from pathlib import Path
file_name = str(Path(__file__).parent/method.__name__)
plt.savefig(f"{file_name}.png")
os.system(f"convert {file_name}.png -trim {file_name}.png")
# plt.savefig(f"{file_name}.pdf")
# os.system(f"pdfcrop {file_name}.pdf")
# os.rename(f"{file_name}-crop.pdf", f"{file_name}.pdf")
| 34.719298 | 105 | 0.67812 |
795674217f4c430d92705508e74ef26923c3a3ec | 1,066 | py | Python | .repo_tools/.repo_tmpl/svg_to_json.py | dnegorov/test-git | 3427ec94b7ca3c72a6eb108e523853fc0fa958d2 | [
"MIT"
] | null | null | null | .repo_tools/.repo_tmpl/svg_to_json.py | dnegorov/test-git | 3427ec94b7ca3c72a6eb108e523853fc0fa958d2 | [
"MIT"
] | null | null | null | .repo_tools/.repo_tmpl/svg_to_json.py | dnegorov/test-git | 3427ec94b7ca3c72a6eb108e523853fc0fa958d2 | [
"MIT"
] | null | null | null | import argparse
import json
parser = argparse.ArgumentParser(add_help=True,
description="SVG to JSON")
parent_parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("-if", "--input",
help="path to input file: svg_to_json -in <FILE>")
parser.add_argument("-of", "--output",
help="path to output file: svg_to_json -in <IN_FILE> -of <OUT_FILE>")
args = parser.parse_args()
print(args)
in_path = None
if args.input:
in_path = args.input
output = {}
if in_path is not None:
print(f"Converting {in_path}")
with open(in_path, "r") as in_file:
lines = in_file.readlines()
print(lines[1])
output = {"logoSvg": "".join(lines)}
print(output)
of_path = None
if args.output:
of_path = args.output
if of_path is not None:
print(f"Saving to {of_path}")
with open(of_path, "w") as of_file:
json.dump(output, of_file)
| 26.65 | 89 | 0.625704 |
795674466c631d88b9ff440e35b7215e1f7a985b | 1,719 | py | Python | docker_links/internals.py | wlonk/docker_links | 387485bfd6cc73a86b624abda5be7dc9e3ed1c2c | [
"BSD-3-Clause"
] | null | null | null | docker_links/internals.py | wlonk/docker_links | 387485bfd6cc73a86b624abda5be7dc9e3ed1c2c | [
"BSD-3-Clause"
] | null | null | null | docker_links/internals.py | wlonk/docker_links | 387485bfd6cc73a86b624abda5be7dc9e3ed1c2c | [
"BSD-3-Clause"
] | 1 | 2019-11-23T15:53:00.000Z | 2019-11-23T15:53:00.000Z | # -*- coding: utf-8 -*-
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
def docker_link(env_variable, overrides=None):
if overrides is None:
overrides = {}
url = urlparse(os.environ.get(env_variable, ''))
return _docker_link_helper(
url_to_dict(url),
overrides,
)
def build_netloc(username, password, hostname, port):
if not password:
creds = "{username}"
else:
creds = "{username}:{password}"
if not port:
host = "{hostname}"
else:
host = "{hostname}:{port}"
if not (username or password):
return host.format(hostname=hostname, port=port)
else:
return (creds + "@" + host).format(
username=username,
password=password,
hostname=hostname,
port=port,
)
def url_to_dict(url):
return {
"scheme": url.scheme,
"netloc": url.netloc,
"path": url.path,
"query": url.query,
"fragment": url.fragment,
"username": url.username,
"password": url.password,
"hostname": url.hostname,
"password": url.password,
}
def dict_to_url(url_dict):
return (
url_dict["scheme"],
build_netloc(
url_dict["username"],
url_dict["password"],
url_dict["hostname"],
url_dict["port"],
),
url_dict["path"],
url_dict["query"],
url_dict["fragment"],
)
def _docker_link_helper(url_dict, overrides):
url_dict = url_dict.copy() # Let's avoid mutation, shall we?
url_dict.update(overrides)
return urlparse.urlunsplit(dict_to_url(url_dict))
| 23.22973 | 65 | 0.571262 |
795674ab322e56eda3c541b6f4942e886fab7300 | 6,674 | py | Python | mcutils/input_validation.py | macanepa/mcutils | 16e71f93ce61d76a16092af8c38928036a4ef409 | [
"MIT"
] | null | null | null | mcutils/input_validation.py | macanepa/mcutils | 16e71f93ce61d76a16092af8c38928036a4ef409 | [
"MIT"
] | null | null | null | mcutils/input_validation.py | macanepa/mcutils | 16e71f93ce61d76a16092af8c38928036a4ef409 | [
"MIT"
] | null | null | null | from .print_manager import mcprint, Color
import logging
def exit_application(text: str = None, enter_quit: bool =False):
"""Exit application
Args:
text (str): Text to display before exiting
enter_quit (bool): If true, user must press enter before exiting
"""
if text:
mcprint(text=text, color=Color.YELLOW)
logging.info('Exiting Application Code:0')
if enter_quit:
get_input('Press Enter to exit...')
exit(0)
def print_error(operators_list=None, contains_list=None, return_type=None):
if operators_list:
for operator in operators_list:
if return_type == int:
logging.warning('input must be {}'.format(operator))
elif return_type == str:
logging.warning('input length must be {}'.format(operator))
if contains_list:
logging.warning('input must be one of the following')
for contains in contains_list:
mcprint(text='\t{}'.format(contains), color=Color.RED)
def input_validation(user_input, return_type, valid_options):
if return_type == int:
if not user_input.isnumeric():
return False
user_input = int(user_input)
# Contains validation
if valid_options:
operators_list = list(filter(lambda x: str(x).startswith(('<', '>', '==', '!=')), valid_options))
contains_list = list(set(valid_options) - set(operators_list))
# Complex validation
# Special operators
for operator in operators_list:
if '<=' in operator:
value = operator.replace('<=', '')
if return_type == int:
if not user_input <= int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) <= int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif '>=' in operator:
value = operator.replace('>=', '')
if return_type == int:
if not user_input >= int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) >= int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif '<' in operator:
value = operator.replace('<', '')
if return_type == int:
if not user_input < int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) < int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif '>' in operator:
value = operator.replace('>', '')
if return_type == int:
if not user_input > int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) > int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif '==' in operator:
value = operator.replace('==', '')
if return_type == int:
if not user_input == int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) == int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif '!=' in operator:
value = operator.replace('!=', '')
if return_type == int:
if not user_input != int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
elif return_type == str:
if not len(user_input) != int(value):
print_error(operators_list=operators_list, return_type=return_type)
return False
# if contains in valid options
if len(contains_list) > 0:
if user_input not in contains_list:
return False
return True
def get_input(format_: str = '>> ', text: str = None, can_exit: bool = True, exit_input: str= 'exit',
valid_options: list = None, return_type: type = str,
validation_function=None, color: Color = None):
"""
Require the user to input a value
Args:
format_ (str): Will add this string at the beginning of the text
text (str): Text to be displayed before input
can_exit (bool): If true, when user types the exit_input command, the application will exit
exit_input (str): Special string, when user inputs this command, the application will exit
valid_options (list): If the input of the user is not in valid_options, the menu will ask for it again
return_type (type): Indicates the required return type
validation_function (function): Function used to validate if the input is correct
color (Color): Color used for displaying the text
"""
if text:
mcprint(text=text, color=color)
while True:
user_input = input(format_)
# Emergency exit system
if user_input == exit_input:
if can_exit:
exit_application()
else:
logging.warning('Can\'t exit application now')
# This is the build-in validations system
if validation_function:
validation = validation_function.__call__(user_input)
# This is the external validation system
else:
# from input_validation import input_validation
validation = input_validation(user_input=user_input, return_type=return_type, valid_options=valid_options)
if validation:
break
logging.warning('Not Valid Entry')
return user_input
| 40.695122 | 118 | 0.567126 |
795675ad0ee581131e303a292d2c734522f2729e | 309 | py | Python | ecstremity/registries/registry.py | ddmills/ECStremity | afe36ea596c9fe75f8c44576459d5dd2422ec579 | [
"MIT"
] | null | null | null | ecstremity/registries/registry.py | ddmills/ECStremity | afe36ea596c9fe75f8c44576459d5dd2422ec579 | [
"MIT"
] | null | null | null | ecstremity/registries/registry.py | ddmills/ECStremity | afe36ea596c9fe75f8c44576459d5dd2422ec579 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import *
from collections import defaultdict
if TYPE_CHECKING:
from engine import Engine, EngineAdapter
class Registry(defaultdict):
def __init__(self, ecs: Union[Engine, EngineAdapter]) -> None:
self.ecs: Union[Engine, EngineAdapter] = ecs
| 22.071429 | 66 | 0.754045 |
795675c114f315da527de79c8c869b63c38470ff | 8,932 | py | Python | pru/db/io.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
] | null | null | null | pru/db/io.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
] | null | null | null | pru/db/io.py | euctrl-pru/rt-python | da5d0040e250bd159845a0d43bf0b73eab368863 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
"""
A python module that reads csv or geojson data files including
sectors, airports and fleet data.
"""
import csv
from pathlib import Path
from shapely.geometry import shape
import geojson
def _convert_json_coordinates(coordinates):
"""Convert a geojson expression of a sector boundary to a wkt."""
return [shape(coordinates).wkt]
def _convert_json_properties(properties):
"""
Convert a geojson expression of the properties of a sector into a
list of properties. Properties are in a dictionary.
The geojson has no object id field so we inject one.
"""
return [properties['AC_ID'], properties['AV_AIRSPACE_ID'],
properties['AV_ICAO_STATE_ID'], properties['MIN_FLIGHT_LEVEL'],
properties['MAX_FLIGHT_LEVEL'], properties['AV_NAME'],
properties['SECTOR_TYPE'], '0']
def _convert_json_custom_properties(properties):
"""
Convert a geojson expression of the properties of a user defined sector
into a list of properties. Properties are in a dictionary.
"""
return [properties['ORG_ID'], properties['USER_ID'],
properties['SECTOR_ID'], properties['LATITUDE'],
properties['LONGITUDE'], properties['RADIUS'],
properties['MIN_FLIGHT_LEVEL'], properties['MAX_FLIGHT_LEVEL'],
properties['IS_CYLINDER']]
def _convert_json(properties, coordinates):
"""
Given a geojson expression of sector properties and sector coordinates
convert them to a list of properties with a wkt description of a boundary.
Parameters
----------
properties The geojson properties string.
coordinates The geojson coordinate string representing the sector shape.
"""
return _convert_json_properties(properties) + \
_convert_json_coordinates(coordinates)
def _convert_custom_json(properties, coordinates):
"""
Given a geojson expression of sector properties and sector coordinates
convert them to a list of properties with a wkt description of a boundary.
Parameters
----------
properties The geojson properties string.
coordinates The geojson coordinate string representing the sector shape.
"""
return _convert_json_custom_properties(properties) + \
_convert_json_coordinates(coordinates)
def _read_lazy_sectors_geojson(path):
"""
Returns a generator - function is lazy.
Given a path (as a text string) to a geojson file describing an airspace,
reads in the file to rows of airspace definitions. Returns a generator
over a list of rows.
This function is a substitute for the csv reader.
Parameters
----------
path The string path to the geojson file.
Use in the same way as the csv reader.
"""
with open(path) as gjs:
# First yield the header row
yield ['AC_ID',
'AV_AIRSPACE_ID',
'AV_ICAO_STATE_ID',
'MIN_FLIGHT_LEVEL',
'MAX_FLIGHT_LEVEL',
'AV_NAME',
'SECTOR_TYPE',
'OBJECTID',
'WKT']
geo_json_data = geojson.load(gjs)
for feature in geo_json_data['features']:
yield _convert_json(feature['properties'], feature['geometry'])
def _read_lazy_custom_sectors_geojson(path):
"""
Returns a generator - function is lazy.
Given a path (as a text string) to a geojson file describing an airspace,
reads in the file to rows of airspace definitions. Returns a generator
over a list of rows.
This function is a substitute for the csv reader.
Parameters
----------
path The string path to the geojson file.
Use in the same way as the csv reader.
"""
with open(path) as gjs:
# First yield the header row
yield ['ORG_ID',
'USER_ID',
'SECTOR_ID',
'LATITUDE',
'LONGITUDE',
'RADIUS',
'MIN_FLIGHT_LEVEL',
'MAX_FLIGHT_LEVEL',
'IS_CYLINDER',
'WKT']
geo_json_data = geojson.load(gjs)
for feature in geo_json_data['features']:
yield _convert_json(feature['properties'], feature['geometry'])
def _read_lazy_CSV(file_path):
"""
Returns a generator - function is lazy.
Given a path to a csv file that has the first line as the field names,
returns a list of dictionaries of field name to value, one dictionary
per field, one list per row.
Parameters
----------
path The string path to the csv file.
Example of use :
gen = read_lazy_CSV(dataPath)
[next(gen) for _ in range(5)] The next five samples
next(gen) The next sample
"""
with open(file_path) as csvFile:
reader = csv.reader(csvFile)
for row in reader:
yield row
def read_sectors(path):
"""
Reads airspace sectors from the given string path, supports csv and
geo json.
Returns a generator if the path exists or an empty generator if the path
is not found or the file extension is unknown.
The first row is the header row.
Parameters
----------
path The string path to the sector file, either csv or geojson.
If the path is not a valid file we return an empty itereator.
Examples:
geojson :
json_loc = "/Users/user/Downloads/ES_428.geojson"
sectors_j = read_sectors(json_loc)
next(sectors_j)
[next(sectors_j) for _ in range(3)]
csv:
csv_loc = "/Users/user/Public/elementary_sectors.csv"
sectors_c = read_sectors(csv_loc)
next(sectors_c)
[next(sectors_c) for _ in range(3)]
"""
file_path = Path(path)
if file_path.exists():
if file_path.suffix == ".csv":
return _read_lazy_CSV(file_path)
elif file_path.suffix == ".geojson":
return _read_lazy_sectors_geojson(file_path)
else:
return iter(())
else:
return iter(())
def read_custom_sectors(path):
"""
Reads airspace sectors from the given string path, supports csv and
geo json.
Returns a generator if the path exists or an empty generator if the path
is not found or the file extension is unknown.
The first row is the header row.
Parameters
----------
path The string path to the sector file, either csv or geojson.
If the path is not a valid file we return an empty itereator.
Examples:
geojson :
json_loc = "/Users/user/Downloads/ES_428.geojson"
sectors_j = read_sectors(json_loc)
next(sectors_j)
[next(sectors_j) for _ in range(3)]
csv:
csv_loc = "/Users/user/Public/elementary_sectors.csv"
sectors_c = read_sectors(csv_loc)
next(sectors_c)
[next(sectors_c) for _ in range(3)]
"""
file_path = Path(path)
if file_path.exists():
if file_path.suffix == ".csv":
return _read_lazy_CSV(file_path)
elif file_path.suffix == ".geojson":
return _read_lazy_custom_sectors_geojson(file_path)
else:
return iter(())
else:
return iter(())
def read_fleet_records(path):
"""
Reads a fleet data file from the given string path, supports csv only.
Returns a generator if the path exists or an empty generator if the path
is not found or the file extension is unknown.
The first row is the header row.
Parameters
----------
path The string path to the fleet csv file.
If the path is not a valid file we return an empty itereator.
Example:
csv_loc = "/Users/user/Public/fleet_data-2017_07_01.csv"
fleet_data = read_fleet_records(csv_loc)
next(fleet_data)
[next(fleet_data) for _ in range(3)]
"""
file_path = Path(path)
if file_path.exists():
if file_path.suffix == ".csv":
return _read_lazy_CSV(file_path)
else:
return iter(())
else:
return iter(())
def read_airports_records(path):
"""
Reads an airport data file from the given string path, supports csv only.
Returns a generator if the path exists or an empty generator if the path
is not found or the file extension is unknown.
The first row is the header row.
Parameters
----------
path The string path to the airports csv file.
If the path is not a valid file we return an empty itereator.
Example:
csv_loc = "/Users/user/Public/airports-2017_07_01.csv"
airports = read_airports_records(csv_loc)
next(airports)
[next(airports) for _ in range(3)]
"""
file_path = Path(path)
if file_path.exists():
if file_path.suffix == ".csv":
return _read_lazy_CSV(file_path)
else:
return iter(())
else:
return iter(())
| 28.44586 | 78 | 0.645544 |
795675dd2846cd79920511eb491ab9a6353b4fc1 | 1,666 | py | Python | scripts/wall_return.py | tomokiyasutome/pimouse_run_corridor | 24ec8938949656ff95735c5c598327e5ec931336 | [
"BSD-3-Clause"
] | null | null | null | scripts/wall_return.py | tomokiyasutome/pimouse_run_corridor | 24ec8938949656ff95735c5c598327e5ec931336 | [
"BSD-3-Clause"
] | null | null | null | scripts/wall_return.py | tomokiyasutome/pimouse_run_corridor | 24ec8938949656ff95735c5c598327e5ec931336 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#encoding: utf-8
import rospy,copy,math
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
from pimouse_ros.msg import LightSensorValues
class WallReturn():
def __init__(self):
self.cmd_vel = rospy.Publisher('/cmd_vel',Twist,queue_size=1)
self.sensor_values = LightSensorValues()
rospy.Subscriber('/lightsensors', LightSensorValues, self.callback)
def callback(self,messages):
self.sensor_values = messages
def wall_front(self,ls):
return ls.left_forward > 50 or ls.right_forward > 50
def too_right(self,ls):
return ls.right_side > 50
def too_left(self,ls):
return ls.left_side > 50
def run(self):
rate = rospy.Rate(20)
data = Twist()
while not rospy.is_shutdown():
if self.wall_front(self.sensor_values):
data.linear.x = -0.2
data.angular.z = math.pi
elif self.too_right(self.sensor_values):
data.linear.x = -0.2
data.angular.z = math.pi
elif self.too_left(self.sensor_values):
data.linear.x = -0.2
data.angular.z = - math.pi
else:
data.linear.x = 0.2
data.angular.z = 0.0
self.cmd_vel.publish(data)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('wall_return')
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
rospy.on_shutdown(rospy.ServiceProxy('/motor_off',Trigger).call)
rospy.ServiceProxy('/motor_on',Trigger).call()
WallReturn().run()
| 30.851852 | 75 | 0.617047 |
79567615e2ef8d39900e5758b5c2ab70c0e5f27b | 2,670 | py | Python | src/ZAMPATHA_1.1-insert_api_into_elasticsearch_strict.py | delavet/SOworkspace | 74bbcfa62c7e293b2b02f23249ac408aa22b44af | [
"MIT"
] | 6 | 2020-12-31T06:13:56.000Z | 2022-03-27T16:26:12.000Z | src/ZAMPATHA_1.1-insert_api_into_elasticsearch_strict.py | delavet/SOworkspace | 74bbcfa62c7e293b2b02f23249ac408aa22b44af | [
"MIT"
] | null | null | null | src/ZAMPATHA_1.1-insert_api_into_elasticsearch_strict.py | delavet/SOworkspace | 74bbcfa62c7e293b2b02f23249ac408aa22b44af | [
"MIT"
] | null | null | null | import json
from collections import OrderedDict
import networkx as nx
from nltk.util import pr
from tqdm import tqdm
from elasticsearch import Elasticsearch
from util.concept_map.common import get_latest_concept_map
from util.config import JAVADOC_GLOBAL_NAME, Elasticsearch_host, Elasticsearch_port, API_ELASTIC_DOC_MAP_STORE_PATH
from util.constant import NodeType, NodeAttributes
from util.utils import get_api_extreme_short_name_from_entity_id, pre_tokenize
'''
20210413-采用ZAMPATHA 1.1对Elasticsearch进行重新插入
和ZAMPATHA 1的区别:
对ZAMPATHA的插入规则进行了更为严格的限制
对field和method,只插入方法和属性名,对类,只插入类名
大概思路是去掉 [、 (、 < 之后的内容,然后取最后一个token。。
'''
es = Elasticsearch(hosts=Elasticsearch_host, port=Elasticsearch_port)
def insert_api_concepts_into_elasticsearch(doc_name: str = JAVADOC_GLOBAL_NAME):
global es
concept_map = get_latest_concept_map(doc_name)
api_elastic_map = OrderedDict()
index = -1
for node in tqdm(concept_map.nodes):
api_name = get_api_extreme_short_name_from_entity_id(node)
description = concept_map.nodes[node][NodeAttributes.DESCRIPTION] if NodeAttributes.DESCRIPTION in concept_map.nodes[node].keys(
) else ''
node_type = concept_map.nodes[node][NodeAttributes.Ntype] if NodeAttributes.Ntype in concept_map.nodes[node].keys(
) else ''
if node_type == '':
continue
desc = description
# 插入全是小写,这样就没有大小写问题了。。。
name_tokens = api_name.lower().split('.')
pre_tokenized_tokens = pre_tokenize(
' '.join(api_name.split('.'))).lower().split(' ')
for token in pre_tokenized_tokens:
if token not in name_tokens and token != '':
name_tokens.append(token)
name = ' '.join(name_tokens)
doc_body = {
'name': name,
'description': desc.lower(),
'node_name': node
}
index += 1
api_elastic_map[node] = index
es.index(index=doc_name, doc_type='api', id=index, body=doc_body)
with open(API_ELASTIC_DOC_MAP_STORE_PATH[JAVADOC_GLOBAL_NAME], 'w', encoding='utf-8') as wf:
json.dump(api_elastic_map, wf, ensure_ascii=False, indent=2)
if __name__ == "__main__":
insert_api_concepts_into_elasticsearch(JAVADOC_GLOBAL_NAME)
print('insert over, have a try for search')
# 插入完了试一下查询
query_body = {
'query': {
'match': {
'description': {
'query': 'arraylist',
'fuzziness': 'auto'
}
}
},
'from': 0,
'size': 20
}
res = es.search(index=JAVADOC_GLOBAL_NAME, body=query_body)
print(res)
| 34.675325 | 136 | 0.666292 |
79567932ec56efd5c1366625561aff318db9c1f0 | 56 | py | Python | lessons/modules/package/__init__.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | lessons/modules/package/__init__.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | 4 | 2018-12-19T13:41:12.000Z | 2019-01-14T15:11:11.000Z | lessons/modules/package/__init__.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | print('Package INIT file')
from .module1 import INIT_P
| 14 | 27 | 0.767857 |
795679462a24c49973b9a50317036f44cab4721c | 63 | py | Python | miscellaneous/a_00_package_dot_path_import/package_1/dir_1/module_1.py | BigMountainTiger/python-excercise-repository | 52a240faa66742ac160c9858ec4bf6a0b51aa248 | [
"MIT"
] | null | null | null | miscellaneous/a_00_package_dot_path_import/package_1/dir_1/module_1.py | BigMountainTiger/python-excercise-repository | 52a240faa66742ac160c9858ec4bf6a0b51aa248 | [
"MIT"
] | 1 | 2022-03-12T01:02:10.000Z | 2022-03-12T01:02:10.000Z | miscellaneous/a_00_package_dot_path_import/package_1/dir_1/module_1.py | BigMountainTiger/python-excercise-repository | 52a240faa66742ac160c9858ec4bf6a0b51aa248 | [
"MIT"
] | null | null | null | from ..dir_2 import module_2
def print():
module_2.printIt() | 15.75 | 28 | 0.730159 |
79567a863a852bc8109c3d6ae686e504d8136992 | 5,530 | py | Python | fabfile/__init__.py | wbez/GradingRahm | 809c9ecff3c0e2d1d3093c2eadeb55fdc3f8628f | [
"FSFAP"
] | 1 | 2021-03-15T03:14:02.000Z | 2021-03-15T03:14:02.000Z | fabfile/__init__.py | busilogic/app-template | 3a8a963535fd084bbe21197e6b4b098723ff5db1 | [
"MIT"
] | 1 | 2015-01-26T17:50:56.000Z | 2015-01-26T17:50:56.000Z | fabfile/__init__.py | wbez/GradingRahm | 809c9ecff3c0e2d1d3093c2eadeb55fdc3f8628f | [
"FSFAP"
] | null | null | null | #!/usr/bin/env python
from fabric.api import local, require, settings, task
from fabric.state import env
from termcolor import colored
import app_config
# Other fabfiles
import assets
import data
import issues
import render
import text
import utils
if app_config.DEPLOY_TO_SERVERS:
import servers
if app_config.DEPLOY_CRONTAB:
import cron_jobs
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
def _deploy_to_s3(path='.gzip'):
"""
Deploy project files to S3.
"""
# Clear files that should never be deployed
local('rm -rf %s/live-data' % path)
local('rm -rf %s/sitemap.xml' % path)
exclude_flags = ''
include_flags = ''
with open('gzip_types.txt') as f:
for line in f:
exclude_flags += '--exclude "%s" ' % line.strip()
include_flags += '--include "%s" ' % line.strip()
exclude_flags += '--exclude "www/assets" '
sync = ('aws s3 sync %s/ %s/ --acl "public-read" ' + exclude_flags + ' --cache-control "max-age=%i" --region "%s"') % (
path,
app_config.S3_DEPLOY_URL,
app_config.DEFAULT_MAX_AGE,
app_config.S3_BUCKET['region']
)
sync_gzip = ('aws s3 sync %s/ %s/ --acl "public-read" --content-encoding "gzip" --exclude "*" ' + include_flags + ' --cache-control "max-age=%i" --region "%s"') % (
path,
app_config.S3_DEPLOY_URL,
app_config.DEFAULT_MAX_AGE,
app_config.S3_BUCKET['region']
)
local(sync)
local(sync_gzip)
def _deploy_assets():
"""
Deploy assets to S3.
"""
sync_assets = 'aws s3 sync www/assets/ %s/assets/ --acl "public-read" --cache-control "max-age=%i" --region "%s"' % (
app_config.S3_DEPLOY_URL,
app_config.ASSETS_MAX_AGE,
app_config.S3_BUCKET['region']
)
local(sync_assets)
def _gzip(in_path='www', out_path='.gzip'):
"""
Gzips everything in www and puts it all in gzip
"""
local('python gzip_assets.py %s %s' % (in_path, out_path))
@task
def update():
"""
Update all application data not in repository (copy, assets, etc).
"""
text.update()
assets.sync()
data.update()
@task
def deploy(remote='origin'):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
if app_config.DEPLOY_TO_SERVERS:
require('branch', provided_by=[stable, master, branch])
if (app_config.DEPLOYMENT_TARGET == 'production' and env.branch != 'stable'):
utils.confirm(
colored("You are trying to deploy the '%s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env.branch, "red")
)
servers.checkout_latest(remote)
servers.fabcast('text.update')
servers.fabcast('assets.sync')
servers.fabcast('data.update')
if app_config.DEPLOY_CRONTAB:
servers.install_crontab()
if app_config.DEPLOY_SERVICES:
servers.deploy_confs()
update()
render.render_all()
_gzip('www', '.gzip')
_deploy_to_s3()
_deploy_assets()
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
sync = 'aws s3 rm s3://%s/%s/ --recursive --region "%s"' % (
app_config.S3_BUCKET['bucket_name'],
app_config.PROJECT_SLUG,
app_config.S3_BUCKET['region']
)
local(sync)
if app_config.DEPLOY_TO_SERVERS:
servers.delete_project()
if app_config.DEPLOY_CRONTAB:
servers.uninstall_crontab()
if app_config.DEPLOY_SERVICES:
servers.nuke_confs()
| 23.733906 | 182 | 0.636709 |
79567b0b4f974589bb104a37abaafed12f335e03 | 2,121 | py | Python | dash/dashblocks/templatetags/dashblocks.py | Ilhasoft/dash | d9b900cc08d9238304a226d837a4c90dec6b46fc | [
"BSD-3-Clause"
] | 7 | 2015-06-25T20:09:35.000Z | 2019-02-12T17:41:46.000Z | dash/dashblocks/templatetags/dashblocks.py | Ilhasoft/dash | d9b900cc08d9238304a226d837a4c90dec6b46fc | [
"BSD-3-Clause"
] | 108 | 2015-01-05T13:23:57.000Z | 2022-02-10T10:55:01.000Z | dash/dashblocks/templatetags/dashblocks.py | rapidpro/dash | 71c8fcd1ab823ef31512b5ee22eca6158b3575c1 | [
"BSD-3-Clause"
] | 9 | 2015-06-15T15:13:13.000Z | 2019-01-09T18:34:20.000Z | from django import template
from django.conf import settings
from dash.dashblocks.models import DashBlock, DashBlockType
"""
This module offers one templatetag called ``load_dashblocks``.
``load_dashblocks`` does a query for all active DashBlock objects
for the passed in DashBlockType and Org on request. (identified by the slug)
You can then access that list within your context.
It accepts 2 parameter:
org
The Org set on the request to filter DashBlocks for that org.
slug
The slug/key of the DashBlockType to load DashBlocks for.
If you want to pass it by name, you have to use quotes on it.
Otherwise just use the variable name.
Syntax::
{% load_dashblocks [org] [name] %}
Example usage::
{% load dashblocks %}
...
{% load_dashblocks request.org "home_banner_blocks" %}
...
Note: You may also use the shortcut tag 'load_qbs'
eg: {% load_qbs request.org "home_banner_blocks %}
.. note::
If you specify a slug that has no associated dash block, then an error message
will be inserted in your template. You may change this text by setting
the value of the DASHBLOCK_STRING_IF_INVALID setting.
"""
register = template.Library()
@register.simple_tag(takes_context=True)
def load_dashblocks(context, org, slug, tag=None):
if not org:
return ""
try:
dashblock_type = DashBlockType.objects.get(slug=slug)
except DashBlockType.DoesNotExist:
default_invalid = '<b><font color="red">DashBlockType with slug: %s not found</font></b>'
return getattr(settings, "DASHBLOCK_STRING_IF_INVALID", default_invalid) % slug
dashblocks = DashBlock.objects.filter(dashblock_type=dashblock_type, org=org, is_active=True)
dashblocks = dashblocks.order_by("-priority")
# filter by our tag if one was specified
if tag is not None:
dashblocks = dashblocks.filter(tags__icontains=tag)
context[slug] = dashblocks
return ""
@register.simple_tag(takes_context=True)
def load_qbs(context, org, slug, tag=None):
return load_dashblocks(context, org, slug, tag)
| 26.848101 | 97 | 0.711457 |
79567bf59d3c73ec1a14318b689daaec3746419e | 864 | py | Python | mmocr/utils/__init__.py | cuongngm/TableMASTER-mmocr | 77efbc420a80f257eb6947a076a50f61c72344bd | [
"Apache-2.0"
] | 206 | 2021-07-30T09:04:08.000Z | 2022-03-22T00:57:44.000Z | mmocr/utils/__init__.py | zezeze97/image2latex | c745667cd1af91dbff2385dcf2f2b80b9a40adb6 | [
"Apache-2.0"
] | 39 | 2021-08-05T07:16:46.000Z | 2022-03-14T13:23:48.000Z | mmocr/utils/__init__.py | zezeze97/image2latex | c745667cd1af91dbff2385dcf2f2b80b9a40adb6 | [
"Apache-2.0"
] | 61 | 2021-07-30T07:51:41.000Z | 2022-03-30T14:40:02.000Z | from mmcv.utils import Registry, build_from_cfg
from .check_argument import (equal_len, is_2dlist, is_3dlist, is_ndarray_list,
is_none_or_type, is_type_list, valid_boundary)
from .collect_env import collect_env
from .data_convert_util import convert_annotations
from .fileio import list_from_file, list_to_file, list_from_folder_table, convert_bbox
from .img_util import drop_orientation, is_not_png
from .lmdb_util import lmdb_converter
from .logger import get_root_logger
__all__ = [
'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',
'is_3dlist', 'is_ndarray_list', 'is_type_list', 'is_none_or_type',
'equal_len', 'is_2dlist', 'valid_boundary', 'lmdb_converter',
'drop_orientation', 'convert_annotations', 'is_not_png', 'list_to_file',
'list_from_file', 'list_from_folder_table', 'convert_bbox'
]
| 45.473684 | 86 | 0.771991 |
79567d720cb8456c0c6bc8307f53cdfccec5645b | 2,335 | py | Python | workflow/scripts/collect_mod_scores.py | Ellison-Lab/TestisTEs2021 | b624d80b216d219701fd60e347c4d30271e8d3d6 | [
"MIT"
] | null | null | null | workflow/scripts/collect_mod_scores.py | Ellison-Lab/TestisTEs2021 | b624d80b216d219701fd60e347c4d30271e8d3d6 | [
"MIT"
] | null | null | null | workflow/scripts/collect_mod_scores.py | Ellison-Lab/TestisTEs2021 | b624d80b216d219701fd60e347c4d30271e8d3d6 | [
"MIT"
] | 1 | 2021-06-24T05:05:45.000Z | 2021-06-24T05:05:45.000Z | import pandas as pd
import scanpy as sc
import pyarrow.dataset as pads
params = pd.read_json("results/finalized/optimal-gep-params/larval-w1118-testes.json")
ds = pads.dataset("results/finalized/larval-w1118-testes/optimal_gep_membership/", format="arrow")
df = ds.to_table().to_pandas()
tep = df.loc[(df["module"] == 27) & (df["qval"] < params["qval"][0])]
tep_genes = tep.loc[["FBgn" in x for x in tep["X1"]]]
tep_tes = tep.loc[[("FBgn" not in x) for x in tep["X1"]]]
ral517 = sc.read_h5ad("subworkflows/gte21-scrna/results/scanpy/adult-ral517-testes/celltypes.h5ad")
wt= sc.read_h5ad("subworkflows/gte21-scrna/results/scanpy/adult-wt-testes/celltypes.h5ad")
larv= sc.read_h5ad("subworkflows/gte21-scrna/results/scanpy/larval-w1118-testes/celltypes.h5ad")
##
# Gene set scores
##
sc.tl.score_genes(ral517,tep["X1"], score_name="tep")
sc.tl.score_genes(ral517,tep_genes["X1"], score_name="tep_genes")
sc.tl.score_genes(ral517,tep_tes["X1"], score_name="tep_tes")
sc.tl.score_genes(wt,tep["X1"], score_name="tep")
sc.tl.score_genes(wt,tep_genes["X1"], score_name="tep_genes")
sc.tl.score_genes(wt,tep_tes["X1"], score_name="tep_tes")
sc.tl.score_genes(larv,tep["X1"], score_name="tep")
sc.tl.score_genes(larv,tep_genes["X1"], score_name="tep_genes")
sc.tl.score_genes(larv,tep_tes["X1"], score_name="tep_tes")
ral_df = ral517.obs[["clusters","tep","tep_genes","tep_tes"]]
wt_df = wt.obs[["clusters","tep","tep_genes","tep_tes"]]
larv_df = larv.obs[["clusters","tep","tep_genes","tep_tes"]]
ral_df["dataset"] = "ral517"
wt_df["dataset"] = "wt"
larv_df["dataset"] = "larval"
res = larv_df.append(ral_df).append(wt_df)
res.to_csv("results/finalized/x-dataset-comparison/mod_scores.csv.gz")
##
# TE expression
##
ral517_te_expr = pd.melt(ral517[:,tep_tes["X1"]].to_df(),var_name="feature",value_name="expression", ignore_index=False)
wt_te_expr = pd.melt(wt[:,tep_tes["X1"]].to_df(),var_name="feature",value_name="expression", ignore_index=False)
larv_te_expr = pd.melt(larv[:,tep_tes["X1"]].to_df(),var_name="feature",value_name="expression", ignore_index=False)
ral517_te_expr["dataset"] = "ral517"
wt_te_expr["dataset"] = "wt"
larv_te_expr ["dataset"] = "larval"
res_te_expr = larv_te_expr.append(ral517_te_expr).append(wt_te_expr)
res_te_expr.to_csv("results/finalized/x-dataset-comparison/te_expression.csv.gz")
| 35.378788 | 120 | 0.732762 |
79567da2f99c23ad0531b53c4fd753fedbe88ecd | 27,570 | py | Python | utils/datasets.py | jiahuei/cisip-FIRe | bcbda2b74dc5a0b26f0338f707a257d660b688a1 | [
"BSD-3-Clause"
] | 25 | 2021-11-17T15:01:00.000Z | 2022-03-28T15:41:24.000Z | utils/datasets.py | jiahuei/cisip-FIRe | bcbda2b74dc5a0b26f0338f707a257d660b688a1 | [
"BSD-3-Clause"
] | 2 | 2021-11-25T13:11:10.000Z | 2021-12-28T16:15:07.000Z | utils/datasets.py | jiahuei/cisip-FIRe | bcbda2b74dc5a0b26f0338f707a257d660b688a1 | [
"BSD-3-Clause"
] | 4 | 2021-11-19T08:29:17.000Z | 2021-12-10T08:22:10.000Z | import logging
import os
from abc import ABC
from typing import Tuple, Any
import numpy as np
import torch
import torchvision
from pandas import read_csv
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import CIFAR10, CIFAR100
from torchvision.datasets.folder import pil_loader, accimage_loader
from torchvision.transforms import transforms
from tqdm import tqdm
import configs
from functions.evaluate_roxf import configdataset, DATASETS
from functions.mining import SimpleMemoryBank
from utils.augmentations import GaussianBlurOpenCV
class BaseDataset(Dataset, ABC):
def get_img_paths(self):
raise NotImplementedError
class HashingDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train',
separate_multiclass=False,
ratio=1):
if torchvision.get_image_backend() == 'PIL':
self.loader = pil_loader
else:
self.loader = accimage_loader
self.separate_multiclass = separate_multiclass
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_data = []
self.train_labels = []
self.ratio = ratio
filename = os.path.join(self.root, self.filename)
is_pkl = False
with open(filename, 'r') as f:
while True:
lines = f.readline()
if not lines:
break
path_tmp = lines.split()[0]
label_tmp = lines.split()[1:]
self.is_onehot = len(label_tmp) != 1
if not self.is_onehot:
label_tmp = lines.split()[1]
if self.separate_multiclass:
assert self.is_onehot, 'if multiclass, please use onehot'
nonzero_index = np.nonzero(np.array(label_tmp, dtype=np.int))[0]
for c in nonzero_index:
self.train_data.append(path_tmp)
label_tmp = ['1' if i == c else '0' for i in range(len(label_tmp))]
self.train_labels.append(label_tmp)
else:
self.train_data.append(path_tmp)
self.train_labels.append(label_tmp)
is_pkl = path_tmp.endswith('.pkl') # if save as pkl, pls make sure dont use different style of loading
if is_pkl:
self.loader = torch.load
self.train_data = np.array(self.train_data)
self.train_labels = np.array(self.train_labels, dtype=float)
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.train_data)
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
self.train_data = self.train_data[randidx]
self.train_labels = self.train_labels[randidx]
logging.info(f'Number of data: {self.train_data.shape[0]}')
def filter_classes(self, classes): # only work for single class dataset
new_data = []
new_labels = []
for idx, c in enumerate(classes):
new_onehot = np.zeros(len(classes))
new_onehot[idx] = 1
cmask = self.train_labels.argmax(axis=1) == c
new_data.append(self.train_data[cmask])
new_labels.append(np.repeat([new_onehot], int(np.sum(cmask)), axis=0))
# new_labels.append(self.train_labels[cmask])
self.train_data = np.concatenate(new_data)
self.train_labels = np.concatenate(new_labels)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.train_data)
def get_img_paths(self):
return self.train_data
class IndexDatasetWrapper(BaseDataset):
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
outs = self.ds.__getitem__(index)
return tuple(list(outs) + [index])
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class Denormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class InstanceDiscriminationDataset(BaseDataset):
def augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.transform(self.to_pil(img))
def weak_augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.weak_transform(self.to_pil(img))
def __init__(self, ds, tmode='simclr', imgsize=224, weak_mode=0) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
if 'simclr' in tmode:
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=int(0.1 * size)),
transforms.ToTensor(),
# 0.2 * 224 = 44 pixels
transforms.RandomErasing(p=0.2, scale=(0.02, 0.2))])
self.transform = data_transforms
# lazy fix, can be more pretty and general, cibhash part 1/2
elif tmode == 'cibhash':
logging.info('CIBHash Augmentations')
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=3),
transforms.ToTensor()])
self.transform = data_transforms
else:
raise ValueError(f'unknown mode {tmode}')
if weak_mode == 1:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.CenterCrop(imgsize),
transforms.ToTensor()
])
elif weak_mode == 2:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.RandomCrop(imgsize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
self.weak_mode = weak_mode
self.tmode = tmode
self.imgsize = imgsize
self.to_pil = transforms.ToPILImage()
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# if self.tmode == 'simclr':
# aug_imgs = [img, self.augment_image(img)]
# else:
if self.weak_mode != 0:
aug_imgs = [self.weak_augment_image(img), self.augment_image(img)]
else:
aug_imgs = [self.augment_image(img), self.augment_image(img)]
return torch.stack(aug_imgs, dim=0), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class RotationDataset(BaseDataset):
@staticmethod
def rotate_img(img, rot):
img = np.transpose(img.numpy(), (1, 2, 0))
if rot == 0: # 0 degrees rotation
out = img
elif rot == 90: # 90 degrees rotation
out = np.flipud(np.transpose(img, (1, 0, 2)))
elif rot == 180: # 90 degrees rotation
out = np.fliplr(np.flipud(img))
elif rot == 270: # 270 degrees rotation / or -90
out = np.transpose(np.flipud(img), (1, 0, 2))
else:
raise ValueError('rotation should be 0, 90, 180, or 270 degrees')
return torch.from_numpy(np.transpose(out, (2, 0, 1)).copy())
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# rot_label = np.random.randint(0, 4) # .item()
rot_labels = [0, 1, 2, 3]
rots = [0, 90, 180, 270]
# rots = [0, rots[rot_label]]
rot_imgs = [self.rotate_img(img, rot) for rot in rots]
return torch.stack(rot_imgs, dim=0), torch.tensor(rot_labels), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class LandmarkDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False, return_id=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
self.return_id = return_id
def get_path(i: str):
return os.path.join(root, self.set_name, i[0], i[1], i[2], i + ".jpg")
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['id'].apply(get_path)
self.max_index = self.df['landmark_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['landmark_id'][index])
else:
target = self.df['landmark_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_id:
return img, target, (self.df['id'][index], index)
return img, target
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class SingleIDDataset(BaseDataset):
"""Dataset with only single class ID
To be merge with Landmark"""
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
def get_path(i: str):
return os.path.join(root, "imgs", i)
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['path'].apply(get_path)
self.max_index = self.df['class_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['class_id'][index])
else:
target = self.df['class_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class ROxfordParisDataset(BaseDataset):
def __init__(self,
dataset='roxford5k',
filename='test.txt',
transform=None,
target_transform=None):
self.loader = pil_loader
self.transform = transform
self.target_transform = target_transform
assert filename in ['test.txt', 'database.txt']
self.set_name = filename
assert dataset in DATASETS
self.cfg = configdataset(dataset, os.path.join('data'))
logging.info(f'Number of data: {self.__len__()}')
def __getitem__(self, index):
if self.set_name == 'database.txt':
img = self.cfg['im_fname'](self.cfg, index)
elif self.set_name == 'test.txt':
img = self.cfg['qim_fname'](self.cfg, index)
img = self.loader(img)
if self.set_name == 'test.txt':
img = img.crop(self.cfg['gnd'][index]['bbx'])
if self.transform is not None:
img = self.transform(img)
return img, index, index # img, None, index is throw error
def __len__(self):
if self.set_name == 'test.txt':
return self.cfg['nq']
elif self.set_name == 'database.txt':
return self.cfg['n']
def get_img_paths(self):
raise NotImplementedError('Not supported.')
class DescriptorDataset(BaseDataset):
def __init__(self, root, filename, ratio=1):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
self.ratio = ratio
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.data_dict['codes'])
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
for key in self.data_dict:
self.data_dict[key] = self.data_dict[key][randidx]
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
label = self.data_dict['labels'][index] # label is 1 indexed, convert to 0-indexed
return embed, label, index # img, None, index is throw error
def __len__(self):
return len(self.data_dict['codes'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class EmbeddingDataset(BaseDataset):
def __init__(self, root,
filename='train.txt'):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
if self.filename == 'train.txt':
label = self.data_dict['labels'][index] - 1 # label is 1 indexed, convert to 0-indexed
else:
label = 0
landmark_id = self.data_dict['id'][index]
return embed, label, (landmark_id, index) # img, None, index is throw error
def __len__(self):
return len(self.data_dict['id'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class NeighbourDatasetWrapper(BaseDataset):
def __init__(self, ds, model, config) -> None:
super(Dataset, self).__init__()
self.ds = ds
device = config['device']
loader = DataLoader(ds, config['batch_size'],
shuffle=False,
drop_last=False,
num_workers=os.cpu_count())
model.eval()
pbar = tqdm(loader, desc='Obtain Codes', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
ret_feats = []
for i, (data, labels, index) in enumerate(pbar):
with torch.no_grad():
data, labels = data.to(device), labels.to(device)
x, code_logits, b = model(data)[:3]
ret_feats.append(x.cpu())
ret_feats = torch.cat(ret_feats)
mbank = SimpleMemoryBank(len(self.ds), model.backbone.in_features, device)
mbank.update(ret_feats)
neighbour_topk = config['dataset_kwargs'].get('neighbour_topk', 5)
indices = mbank.mine_nearest_neighbors(neighbour_topk)
self.indices = indices[:, 1:] # exclude itself
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
img, target = self.ds.__getitem__(index)
randidx = np.random.choice(self.indices[index], 1)[0]
nbimg, nbtar = self.ds.__getitem__(randidx)
return img, target, index, nbimg, nbtar, randidx
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
def one_hot(nclass):
def f(index):
index = torch.tensor(int(index)).long()
return torch.nn.functional.one_hot(index, nclass)
return f
def cifar(nclass, **kwargs):
transform = kwargs['transform']
ep = kwargs['evaluation_protocol']
fn = kwargs['filename']
reset = kwargs['reset']
CIFAR = CIFAR10 if int(nclass) == 10 else CIFAR100
traind = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=True, download=True)
traind = IndexDatasetWrapper(traind)
testd = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=False, download=True)
testd = IndexDatasetWrapper(testd)
if ep == 2: # using orig train and test
if fn == 'test.txt':
return testd
else: # train.txt and database.txt
return traind
combine_data = np.concatenate([traind.data, testd.data], axis=0)
combine_targets = np.concatenate([traind.targets, testd.targets], axis=0)
path = f'data/cifar{nclass}/0_0_{ep}_{fn}'
load_data = fn == 'train.txt'
load_data = load_data and (reset or not os.path.exists(path))
if not load_data:
logging.info(f'Loading {path}')
data_index = torch.load(path)
else:
train_data_index = []
query_data_index = []
db_data_index = []
data_id = np.arange(combine_data.shape[0]) # [0, 1, ...]
for i in range(nclass):
class_mask = combine_targets == i
index_of_class = data_id[class_mask].copy() # index of the class [2, 10, 656,...]
np.random.shuffle(index_of_class)
if ep == 1:
query_n = 100 # // (nclass // 10)
train_n = 500 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
elif ep == 2: # ep2 = take all data
query_n = 1000 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db
elif ep == 3: # Bi-Half Cifar10(II)
query_n = 1000
train_n = 500
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
else:
raise NotImplementedError('')
train_data_index.extend(index_for_train)
query_data_index.extend(index_for_query)
db_data_index.extend(index_for_db)
train_data_index = np.array(train_data_index)
query_data_index = np.array(query_data_index)
db_data_index = np.array(db_data_index)
torch.save(train_data_index, f'data/cifar{nclass}/0_0_{ep}_train.txt')
torch.save(query_data_index, f'data/cifar{nclass}/0_0_{ep}_test.txt')
torch.save(db_data_index, f'data/cifar{nclass}/0_0_{ep}_database.txt')
data_index = {
'train.txt': train_data_index,
'test.txt': query_data_index,
'database.txt': db_data_index
}[fn]
traind.data = combine_data[data_index]
traind.targets = combine_targets[data_index]
return traind
def imagenet100(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/imagenet{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def cars(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/cars', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def landmark(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
return_id = kwargs.get('return_id', False)
d = LandmarkDataset('data/landmark', transform=transform, filename=filename, return_id=return_id)
return d
def nuswide(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
separate_multiclass = kwargs.get('separate_multiclass', False)
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/nuswide_v2_256{suffix}',
transform=transform,
filename=filename,
separate_multiclass=separate_multiclass,
ratio=kwargs.get('ratio', 1))
return d
def nuswide_single(**kwargs):
return nuswide(separate_multiclass=True, **kwargs)
def coco(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/coco{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def roxford5k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='roxford5k', filename=filename, transform=transform)
return d
def rparis6k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='rparis6k', filename=filename, transform=transform)
return d
def gldv2delgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/gldv2delgembed', filename=filename)
return d
def roxford5kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/roxford5kdelgembed', filename=filename)
return d
def rparis6kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/rparis6kdelgembed', filename=filename)
return d
def descriptor(**kwargs):
filename = kwargs['filename']
data_folder = kwargs['data_folder']
d = DescriptorDataset(data_folder, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def mirflickr(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/mirflickr{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def sop_instance(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = SingleIDDataset('data/sop_instance', transform=transform, filename=filename)
return d
def sop(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/sop{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def food101(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/food-101', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
| 33.377724 | 141 | 0.586942 |
79567f394e6fadb27c905f79b808c6e415a95ebb | 11,577 | py | Python | gaphor/core/styling/parser.py | milotype/gaphor | 58370f5f8a2460863265044eaafd9624622fcdff | [
"Apache-2.0"
] | null | null | null | gaphor/core/styling/parser.py | milotype/gaphor | 58370f5f8a2460863265044eaafd9624622fcdff | [
"Apache-2.0"
] | 1 | 2017-04-18T01:51:12.000Z | 2017-04-18T01:51:12.000Z | gaphor/core/styling/parser.py | milotype/gaphor | 58370f5f8a2460863265044eaafd9624622fcdff | [
"Apache-2.0"
] | null | null | null | """Parser for CSS selectors, based on the tinycss2 tokenizer.
Original module: cssselect2.parser
:copyright: (c) 2012 by Simon Sapin, 2017 by Guillaume Ayoub.
:license: BSD, see LICENSE for more details.
"""
from tinycss2 import parse_component_value_list
__all__ = ["parse"]
def parse(input, namespaces=None):
"""Yield tinycss2 selectors found in given ``input``.
:param input:
A string, or an iterable of tinycss2 component values.
"""
if isinstance(input, str):
input = parse_component_value_list(input)
tokens = TokenStream(input)
namespaces = namespaces or {}
yield parse_selector(tokens, namespaces)
tokens.skip_whitespace_and_comment()
while 1:
next = tokens.next()
if next is None:
return
elif next == ",":
yield parse_selector(tokens, namespaces)
else:
raise SelectorError(next, "unpexpected %s token." % next.type)
def parse_selector(tokens, namespaces):
result = parse_compound_selector(tokens, namespaces)
while 1:
has_whitespace = tokens.skip_whitespace()
while tokens.skip_comment():
has_whitespace = tokens.skip_whitespace() or has_whitespace
peek = tokens.peek()
if peek in (">", "+", "~"):
combinator = peek.value
tokens.next()
elif peek is None or peek == "," or not has_whitespace:
return result
else:
combinator = " "
compound = parse_compound_selector(tokens, namespaces)
result = CombinedSelector(result, combinator, compound)
def parse_compound_selector(tokens, namespaces):
type_selectors = parse_type_selector(tokens, namespaces)
simple_selectors = type_selectors if type_selectors is not None else []
while 1:
simple_selector = parse_simple_selector(tokens, namespaces)
if simple_selector is None:
break
simple_selectors.append(simple_selector)
if simple_selectors or type_selectors is not None:
return CompoundSelector(simple_selectors)
peek = tokens.peek()
raise SelectorError(
peek,
"expected a compound selector, got %s" % (peek.type if peek else "EOF"),
)
def parse_type_selector(tokens, namespaces):
tokens.skip_whitespace()
qualified_name = parse_qualified_name(tokens, namespaces)
if qualified_name is None:
return None
simple_selectors = []
namespace, local_name = qualified_name
if local_name is not None:
simple_selectors.append(LocalNameSelector(local_name))
if namespace is not None:
simple_selectors.append(NamespaceSelector(namespace)) # type: ignore[arg-type]
return simple_selectors
def parse_simple_selector(tokens, namespaces):
peek = tokens.peek()
if peek is None:
return None
if peek.type == "hash" and peek.is_identifier:
tokens.next()
return IDSelector(peek.value)
elif peek == ".":
tokens.next()
next = tokens.next()
if next is None or next.type != "ident":
raise SelectorError(next, "Expected a class name, got %s" % next)
return ClassSelector(next.value)
elif peek.type == "[] block":
tokens.next()
return parse_attribute_selector(TokenStream(peek.content), namespaces)
elif peek == ":":
tokens.next()
next = tokens.next()
if next == ":":
next = tokens.next()
if next is None or next.type != "ident":
raise SelectorError(
next, "Expected a pseudo-element name, got %s" % next
)
return PseudoElementSelector(next.lower_value)
elif next is not None and next.type == "ident":
return PseudoClassSelector(next.lower_value)
elif next is not None and next.type == "function":
return FunctionalPseudoClassSelector(next.lower_name, next.arguments)
else:
raise SelectorError(next, "unexpected %s token." % next)
else:
return None
def parse_attribute_selector(tokens, namespaces):
tokens.skip_whitespace()
qualified_name = parse_qualified_name(tokens, namespaces, is_attribute=True)
if qualified_name is None:
next = tokens.next()
raise SelectorError(next, "expected attribute name, got %s" % next)
namespace, local_name = qualified_name
# Allow syntax like "subject.ownedAttribute"
if local_name:
name, lower_name = local_name
while 1:
peek = tokens.peek()
if peek == ".":
next = tokens.next()
name += next.value
lower_name += next.value
elif peek and peek.type == "ident":
next = tokens.next()
name += next.value
lower_name += next.lower_value
else:
break
local_name = name, lower_name
tokens.skip_whitespace()
peek = tokens.peek()
if peek is None:
operator = None
value = None
elif peek in ("=", "~=", "|=", "^=", "$=", "*="):
operator = peek.value
tokens.next()
tokens.skip_whitespace()
next = tokens.next()
if next is None or next.type not in ("ident", "string"):
next_type = "None" if next is None else next.type
raise SelectorError(next, "expected attribute value, got %s" % next_type)
value = next.value
else:
raise SelectorError(peek, "expected attribute selector operator, got %s" % peek)
tokens.skip_whitespace()
next = tokens.next()
if next is not None:
raise SelectorError(next, "expected ], got %s" % next.type)
return AttributeSelector(namespace, local_name, operator, value)
def parse_qualified_name(tokens, namespaces, is_attribute=False):
"""Return ``(namespace, local)`` for given tokens.
Can also return ``None`` for a wildcard.
The empty string for ``namespace`` means "no namespace".
"""
peek = tokens.peek()
if peek is None:
return None
if peek.type == "ident":
first_ident = tokens.next()
peek = tokens.peek()
if peek != "|":
namespace = "" if is_attribute else namespaces.get(None, None)
return namespace, (first_ident.value, first_ident.lower_value)
tokens.next()
namespace = namespaces.get(first_ident.value)
if namespace is None:
raise SelectorError(
first_ident, "undefined namespace prefix: " + first_ident.value
)
elif peek == "*":
next = tokens.next()
peek = tokens.peek()
if peek != "|":
if is_attribute:
raise SelectorError(next, "Expected local name, got %s" % next.type)
return namespaces.get(None, None), None
tokens.next()
namespace = None
elif peek == "|":
tokens.next()
namespace = ""
else:
return None
# If we get here, we just consumed '|' and set ``namespace``
next = tokens.next()
if next.type == "ident":
return namespace, (next.value, next.lower_value)
elif next == "*" and not is_attribute:
return namespace, None
else:
raise SelectorError(next, "Expected local name, got %s" % next.type)
class SelectorError(ValueError):
"""A specialized ``ValueError`` for invalid selectors."""
class TokenStream(object):
def __init__(self, tokens):
self.tokens = iter(tokens)
self.peeked = [] # In reversed order
def next(self):
if self.peeked:
return self.peeked.pop()
else:
return next(self.tokens, None)
def peek(self):
if not self.peeked:
self.peeked.append(next(self.tokens, None))
return self.peeked[-1]
def skip(self, skip_types):
found = False
while 1:
peek = self.peek()
if peek is None or peek.type not in skip_types:
break
self.next()
found = True
return found
def skip_whitespace(self):
return self.skip(["whitespace"])
def skip_comment(self):
return self.skip(["comment"])
def skip_whitespace_and_comment(self):
return self.skip(["comment", "whitespace"])
class CombinedSelector(object):
def __init__(self, left, combinator, right):
#: Combined or compound selector
self.left = left
# One of `` `` (a single space), ``>``, ``+`` or ``~``.
self.combinator = combinator
#: compound selector
self.right = right
@property
def specificity(self):
a1, b1, c1 = self.left.specificity
a2, b2, c2 = self.right.specificity
return a1 + a2, b1 + b2, c1 + c2
def __repr__(self):
return "%r%s%r" % (self.left, self.combinator, self.right)
class CompoundSelector(object):
"""Aka.
sequence of simple selectors, in Level 3.
"""
def __init__(self, simple_selectors):
self.simple_selectors = simple_selectors
@property
def specificity(self):
if self.simple_selectors:
return tuple(
map(sum, zip(*(sel.specificity for sel in self.simple_selectors)))
)
else:
return 0, 0, 0
def __repr__(self):
return "".join(map(repr, self.simple_selectors))
class LocalNameSelector(object):
specificity = 0, 0, 1
def __init__(self, local_name):
self.local_name, self.lower_local_name = local_name
def __repr__(self):
return self.local_name
class NamespaceSelector(object):
specificity = 0, 0, 0
def __init__(self, namespace):
#: The namespace URL as a string,
#: or the empty string for elements not in any namespace.
self.namespace = namespace
def __repr__(self):
if self.namespace == "":
return "|"
else:
return "{%s}|" % self.namespace
class IDSelector(object):
specificity = 1, 0, 0
def __init__(self, ident):
self.ident = ident
def __repr__(self):
return "#" + self.ident
class ClassSelector(object):
specificity = 0, 1, 0
def __init__(self, class_name):
self.class_name = class_name
def __repr__(self):
return "." + self.class_name
class AttributeSelector(object):
specificity = 0, 1, 0
def __init__(self, namespace, name, operator, value):
self.namespace = namespace
self.name, self.lower_name = name
#: A string like ``=`` or ``~=``, or None for ``[attr]`` selectors
self.operator = operator
#: A string, or None for ``[attr]`` selectors
self.value = value
def __repr__(self):
namespace = "*|" if self.namespace is None else "{%s}" % self.namespace
return "[%s%s%s%r]" % (namespace, self.name, self.operator, self.value)
class PseudoClassSelector:
specificity = 0, 1, 0
def __init__(self, name):
self.name = name
def __repr__(self):
return ":" + self.name
class PseudoElementSelector:
specificity = 0, 0, 1
def __init__(self, name):
self.name = name
def __repr__(self):
return "::" + self.name
class FunctionalPseudoClassSelector(object):
specificity = 0, 1, 0
def __init__(self, name, arguments):
self.name = name
self.arguments = arguments
def __repr__(self):
return ":%s%r" % (self.name, tuple(self.arguments))
| 29.684615 | 88 | 0.605338 |
79568083b0b0e39262ce2eb1bc28cc79cc9330b6 | 1,722 | py | Python | package/spack-py-brian/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-brian/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-brian/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBrian(PythonPackage):
"""A clock-driven simulator for spiking neural networks"""
homepage = "http://www.briansimulator.org"
url = "https://pypi.io/packages/source/b/brian/brian-1.4.3.tar.gz"
version('1.4.3', '0570099bcce4d7afde73ff4126e6c30f')
depends_on('py-matplotlib@0.90.1:', type=('build', 'run'))
depends_on('py-numpy@1.4.1:', type=('build', 'run'))
depends_on('py-scipy@0.7.0:', type=('build', 'run'))
| 44.153846 | 78 | 0.660859 |
795680a8c66ef81d1137de841a8d5fba08f7c6d1 | 6,808 | py | Python | scripts/generate_docs.py | buldi/DIGITS | 97b8379aab7c83006548fd81fe7513cc9d48a34f | [
"BSD-3-Clause"
] | 1 | 2019-08-27T21:12:15.000Z | 2019-08-27T21:12:15.000Z | scripts/generate_docs.py | buldi/DIGITS | 97b8379aab7c83006548fd81fe7513cc9d48a34f | [
"BSD-3-Clause"
] | null | null | null | scripts/generate_docs.py | buldi/DIGITS | 97b8379aab7c83006548fd81fe7513cc9d48a34f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import time
from collections import defaultdict
# requires a custom version of Flask-Autodoc:
# pip install git+https://github.com/lukeyeager/flask-autodoc.git
from flask.ext.autodoc import Autodoc
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config; digits.config.load_config()
from digits.webapp import app, _doc as doc
class DocGenerator(object):
"""
Generates markdown for Flask routes
"""
def __init__(self, autodoc,
include_groups=None, exclude_groups=None):
"""
Arguments:
autodoc -- an Autodoc instance
Keyword arguments:
include_groups -- a list of groups to print
exclude_groups -- a list of groups not to print
"""
self.autodoc = autodoc
self.include_groups = include_groups
self.exclude_groups = exclude_groups
self._handle = None
# get list of groups
group_names = defaultdict(int)
for func, groups in self.autodoc.func_groups.iteritems():
for group in groups:
group_names[group] += 1
first_groups = ['home', 'jobs', 'datasets', 'models']
hidden_groups = ['all']
other_groups = [g for g in sorted(group_names.keys())
if g not in first_groups + hidden_groups]
self._groups = first_groups + other_groups
def generate(self, filename):
"""
Writes the documentation to file
"""
with open(os.path.join(
os.path.dirname(__file__),
filename), 'w') as self._handle:
groups = []
for group in self._groups:
if (not self.include_groups or group in self.include_groups) and \
(not self.exclude_groups or group not in self.exclude_groups):
groups.append(group)
self.print_header()
self._print_toc(groups)
for group in groups:
self._print_group(group, print_header=(len(groups)>1))
def w(self, line='', add_newline=True):
"""
Writes a line to file
"""
if add_newline:
line = '%s\n' % line
self._handle.write(line)
def _print_header(self, header):
"""
Print the document page header
"""
pass
def timestamp(self):
"""
Returns a string which notes the current time
"""
return time.strftime('*Generated %b %d, %Y*')
def _print_toc(self, groups=None):
"""
Print the table of contents
"""
if groups is None:
groups = self._groups
if len(groups) <= 1:
# No sense printing the TOC
return
self.w('### Table of Contents')
self.w()
for group in groups:
self.w('* [%s](#%s)' % (group.capitalize(), group))
self.w()
def _print_group(self, group, print_header=True):
"""
Print a group of routes
"""
routes = self.get_routes(group)
if not routes:
return
if print_header:
self.w('## %s' % group.capitalize())
self.w()
for route in routes:
self._print_route(route)
def get_routes(self, group):
"""
Get the routes for this group
"""
return self.autodoc.generate(groups=group)
def _print_route(self, route):
"""
Print a route
"""
self.w('### `%s`' % route['rule'])
self.w()
if route['docstring']:
for line in route['docstring'].strip().split('\n'):
self.w('> %s' % line.strip())
self.w()
self.w('Methods: ' + ', '.join(['**%s**' % m.upper() for m in
sorted(route['methods']) if m not in ['HEAD', 'OPTIONS']]))
self.w()
if route['args'] and route['args'] != ['None']:
args = []
for arg in route['args']:
args.append('`%s`' % arg)
if route['defaults'] and arg in route['defaults']:
args[-1] = '%s (`%s`)' % (args[-1], route['defaults'][arg])
self.w('Arguments: ' + ', '.join(args))
self.w()
if 'location' in route and route['location']:
# get location relative to digits root
digits_root = os.path.dirname(
os.path.dirname(
os.path.normpath(digits.__file__)
)
)
filename = os.path.normpath(route['location']['filename'])
if filename.startswith(digits_root):
filename = os.path.relpath(filename, digits_root).replace("\\","/")
self.w('Location: [`%s`](%s)' % (
filename,
os.path.join('..', filename).replace("\\","/"),
))
self.w()
class ApiDocGenerator(DocGenerator):
"""
Generates API.md
"""
def __init__(self, *args, **kwargs):
super(ApiDocGenerator, self).__init__(include_groups=['api'], *args, **kwargs)
def print_header(self):
text = """
# REST API
%s
DIGITS exposes its internal functionality through a REST API. You can access these endpoints by performing a GET or POST on the route, and a JSON object will be returned.
For more information about other routes used for the web interface, see [this page](FlaskRoutes.md).
""" % self.timestamp()
self.w(text.strip())
self.w()
def get_routes(self, group):
for route in self.autodoc.generate(groups=group):
if '.json' in route['rule']:
yield route
class FlaskRoutesDocGenerator(DocGenerator):
"""
Generates FlaskRoutes.md
"""
def __init__(self, *args, **kwargs):
super(FlaskRoutesDocGenerator, self).__init__(exclude_groups=['api'], *args, **kwargs)
def print_header(self):
text = """
# Flask Routes
%s
Documentation on the various routes used internally for the web application.
These are all technically RESTful, but they return HTML pages. To get JSON responses, see [this page](API.md).
""" % self.timestamp()
self.w(text.strip())
self.w()
def get_routes(self, group):
for route in self.autodoc.generate(groups=group):
if '.json' not in route['rule']:
yield route
if __name__ == '__main__':
with app.app_context():
ApiDocGenerator(doc).generate('../docs/API.md')
FlaskRoutesDocGenerator(doc).generate('../docs/FlaskRoutes.md')
| 30.392857 | 170 | 0.55567 |
795682d8e22dc8aaea144f94744f1feb1ddf0af6 | 2,227 | py | Python | aliyun-python-sdk-vpc/aliyunsdkvpc/request/v20160428/DescribeVpnConnectionRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-vpc/aliyunsdkvpc/request/v20160428/DescribeVpnConnectionRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-vpc/aliyunsdkvpc/request/v20160428/DescribeVpnConnectionRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class DescribeVpnConnectionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'DescribeVpnConnection','Vpc')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_VpnConnectionId(self):
return self.get_query_params().get('VpnConnectionId')
def set_VpnConnectionId(self,VpnConnectionId):
self.add_query_param('VpnConnectionId',VpnConnectionId) | 36.508197 | 79 | 0.778626 |
795683a50349b48481442a1754bbea66f79e4116 | 879 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_depth_texture.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_depth_texture.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/WGL/NV/render_depth_texture.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.WGL import _types as _cs
# End users want this...
from OpenGL.raw.WGL._types import *
from OpenGL.raw.WGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'WGL_NV_render_depth_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.WGL,'WGL_NV_render_depth_texture',error_checker=_errors._error_checker)
WGL_BIND_TO_TEXTURE_DEPTH_NV=_C('WGL_BIND_TO_TEXTURE_DEPTH_NV',0x20A3)
WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV=_C('WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV',0x20A4)
WGL_DEPTH_COMPONENT_NV=_C('WGL_DEPTH_COMPONENT_NV',0x20A7)
WGL_DEPTH_TEXTURE_FORMAT_NV=_C('WGL_DEPTH_TEXTURE_FORMAT_NV',0x20A5)
WGL_TEXTURE_DEPTH_COMPONENT_NV=_C('WGL_TEXTURE_DEPTH_COMPONENT_NV',0x20A6)
| 43.95 | 122 | 0.843003 |
7956847beaf1d3d028f1b684183a4db14f736c69 | 6,766 | py | Python | kibble/scanners/scanners/git-sync.py | michalslowikowski00/kibble | 1aa68040a70397b3aea7b26def912ad67d717b67 | [
"Apache-2.0"
] | 3 | 2020-10-07T10:36:20.000Z | 2020-10-24T20:43:02.000Z | kibble/scanners/scanners/git-sync.py | michalslowikowski00/kibble | 1aa68040a70397b3aea7b26def912ad67d717b67 | [
"Apache-2.0"
] | null | null | null | kibble/scanners/scanners/git-sync.py | michalslowikowski00/kibble | 1aa68040a70397b3aea7b26def912ad67d717b67 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import time
from kibble.scanners.utils import git
title = "Sync plugin for Git repositories"
version = "0.1.2"
def accepts(source):
""" Do we accept this source? """
if source["type"] == "git":
return True
# There are cases where we have a github repo, but don't wanna annalyze the code, just issues
if source["type"] == "github" and source.get("issuesonly", False) == False:
return True
return False
def scan(KibbleBit, source):
# Get some vars, construct a data path for the repo
path = source["sourceID"]
url = source["sourceURL"]
rootpath = "%s/%s/git" % (
KibbleBit.config["scanner"]["scratchdir"],
source["organisation"],
)
# If the root path does not exist, try to make it recursively.
if not os.path.exists(rootpath):
try:
os.makedirs(rootpath, exist_ok=True)
print("Created root path %s" % rootpath)
except Exception as err:
source["steps"]["sync"] = {
"time": time.time(),
"status": "Could not create root scratch dir - permision denied?",
"running": False,
"good": False,
}
KibbleBit.updateSource(source)
return
# This is were the repo should be cloned
datapath = os.path.join(rootpath, path)
KibbleBit.pprint("Checking out %s as %s" % (url, path))
try:
source["steps"]["sync"] = {
"time": time.time(),
"status": "Fetching code data from source location...",
"running": True,
"good": True,
}
KibbleBit.updateSource(source)
# If we already checked this out earlier, just sync it.
if os.path.exists(datapath):
KibbleBit.pprint("Repo %s exists, fetching changes..." % datapath)
# Do we have a default branch here?
branch = git.defaultBranch(source, datapath, KibbleBit)
if len(branch) == 0:
source["default_branch"] = branch
source["steps"]["sync"] = {
"time": time.time(),
"status": "Could not sync with source",
"exception": "No default branch was found in this repository",
"running": False,
"good": False,
}
KibbleBit.updateSource(source)
KibbleBit.pprint(
"No default branch found for %s (%s)"
% (source["sourceID"], source["sourceURL"])
)
return
KibbleBit.pprint("Using branch %s" % branch)
# Try twice checking out the main branch and fetching changes.
# Sometimes we need to clean up after older scanners, which is
# why we try twice. If first attempt fails, clean up and try again.
for n in range(0, 2):
try:
subprocess.check_output(
"GIT_TERMINAL_PROMPT=0 cd %s && git checkout %s && git fetch --all && git merge -X theirs --no-edit"
% (datapath, branch),
shell=True,
stderr=subprocess.STDOUT,
)
break
except subprocess.CalledProcessError as err:
e = str(err.output).lower()
# We're interested in merge conflicts, which we can resolve through trickery.
if n > 0 or not (
"resolve" in e or "merge" in e or "overwritten" in e
):
# This isn't a merge conflict, pass it to outer func
raise err
else:
# Switch to first commit
fcommit = subprocess.check_output(
"cd %s && git rev-list --max-parents=0 --abbrev-commit HEAD"
% datapath,
shell=True,
stderr=subprocess.STDOUT,
)
fcommit = fcommit.decode("ascii").strip()
subprocess.check_call(
"cd %s && git reset --hard %s" % (datapath, fcommit),
shell=True,
stderr=subprocess.STDOUT,
)
try:
subprocess.check_call(
"cd %s && git clean -xfd" % datpath,
shell=True,
stderr=subprocess.STDOUT,
)
except:
pass
# This is a new repo, clone it!
else:
KibbleBit.pprint("%s is new, cloning...!" % datapath)
subprocess.check_output(
"GIT_TERMINAL_PROMPT=0 cd %s && git clone %s %s"
% (rootpath, url, path),
shell=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
KibbleBit.pprint("Repository sync failed (no master?)")
KibbleBit.pprint(str(err.output))
source["steps"]["sync"] = {
"time": time.time(),
"status": "Sync failed at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": False,
"good": False,
"exception": str(err.output),
}
KibbleBit.updateSource(source)
return
# All good, yay!
source["steps"]["sync"] = {
"time": time.time(),
"status": "Source code fetched successfully at "
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
"running": False,
"good": True,
}
KibbleBit.updateSource(source)
| 38.662857 | 124 | 0.514336 |
795684aab53252c45fb143ce12694d8a3cd0d22c | 1,819 | py | Python | config.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | 1 | 2019-11-19T09:08:50.000Z | 2019-11-19T09:08:50.000Z | config.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | config.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | from tools.utils import make_pattern
from flask_uploads import IMAGES
# 主机的MySQL账号:密码:端口号/数据库
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://root:mysql@localhost:3306/test2'
# SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://root:960919AB@localhost:3308/test2'
# SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://root:123456@localhost:3306/test2'
# 是否输出
SQLALCHEMY_ECHO = False
# student attr list
STUDENT_JSON_ATTR_ORDER = [
'openid', 'email', 'password', 'student_id', 'name', 'sex', 'collage', 'grade', 'edu_bg', 'tag', 'signature', 'cash']
STUDENT_JSON_PATTERN = make_pattern(len(STUDENT_JSON_ATTR_ORDER))
# organization attr list
ORGANIZATION_JSON_ATTR_ORDER = [
'openid', 'email', 'password', 'name', 'description', 'cash']
ORGANIZATION_JSON_PATTERN = make_pattern(len(ORGANIZATION_JSON_ATTR_ORDER))
# task attr list
TASK_JSON_ATTR_ORDER = [
'id', 'publish_id', 'publish_time', 'limit_time', 'limit_num', 'accept_num', 'title', 'content', 'tag', 'image_path', 'reward', 'publisher']
TASK_JSON_PATTERN = make_pattern(len(TASK_JSON_ATTR_ORDER))
# accept task attr list
ACCEPT_JSON_ATTR_ORDER = [
'id', 'tag', 'accept_id', 'task_id', 'accept_time', 'finish_time']
ACCEPT_JSON_PATTERN = make_pattern(len(ACCEPT_JSON_ATTR_ORDER))
# problem attr list 问卷
PROBLEM_JSON_ATTR_ORDER = ['id', 'task_id', 'description', 'all_answers']
PROBLEM_JSON_PATTERN = make_pattern(len(PROBLEM_JSON_ATTR_ORDER))
# answer sttr list
ANSWER_JSON_ATTR_ORDER = ['accept_id', 'problem_id', 'answer']
ANSWER_JSON_PATTERN = make_pattern(len(ANSWER_JSON_ATTR_ORDER))
SPLIT_STU_ORG = 1000000 # student与orgnization间的openid切割
SPLIT_ANSWER = '#' # answer的切割符
DROP_ALL = False # 表示每次重启/启动时是否删除原有表
UPDATE_ADD_NUM = 10 # 一次更新获取的值
ADD_RANDOM_SAMPLE = False
UPLOADED_PHOTOS_DEST = './images/'
UPLOADED_PHOTO_ALLOW = IMAGES | 39.543478 | 144 | 0.766355 |
795685074ca52085b92766bb1df7b17de087f045 | 477 | py | Python | great_expectations/render/renderer/content_block/__init__.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | 1 | 2021-05-04T17:26:22.000Z | 2021-05-04T17:26:22.000Z | great_expectations/render/renderer/content_block/__init__.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | 47 | 2020-07-15T06:32:50.000Z | 2022-03-29T12:03:23.000Z | great_expectations/render/renderer/content_block/__init__.py | joshuataylor/great_expectations | 19dcead43aef9a833b3aa894a1226714a80ab840 | [
"Apache-2.0"
] | null | null | null | from great_expectations.render.renderer.content_block.validation_results_table_content_block import (
ValidationResultsTableContentBlockRenderer,
)
from .bullet_list_content_block import ExpectationSuiteBulletListContentBlockRenderer
from .exception_list_content_block import ExceptionListContentBlockRenderer
from .expectation_string import ExpectationStringRenderer
from .profiling_overview_table_content_block import (
ProfilingOverviewTableContentBlockRenderer,
)
| 43.363636 | 101 | 0.901468 |
7956897d68a3fb49d62ba696d0b6400b4f909989 | 805 | py | Python | python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 2 | 2018-07-05T14:37:36.000Z | 2018-07-05T14:37:42.000Z | python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 3 | 2017-07-15T14:20:08.000Z | 2019-05-06T03:16:54.000Z | python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 1 | 2018-07-20T07:13:31.000Z | 2018-07-20T07:13:31.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test_sum_op import TestSumOp
class TestMKLDNN(TestSumOp):
def init_kernel_type(self):
self.use_mkldnn = True
if __name__ == '__main__':
unittest.main()
| 29.814815 | 74 | 0.751553 |
795689a3715af481e9356dd1c7760711561efede | 148 | py | Python | dropblock/__init__.py | transcendentsky/latent-augment | 50f43fdf9f4ab18e5a1976d873f27be3f7cddab6 | [
"MIT"
] | 556 | 2018-11-03T08:46:31.000Z | 2022-03-28T08:39:31.000Z | dropblock/__init__.py | transcendentsky/latent-augment | 50f43fdf9f4ab18e5a1976d873f27be3f7cddab6 | [
"MIT"
] | 34 | 2018-11-08T05:37:16.000Z | 2022-01-25T04:05:42.000Z | dropblock/__init__.py | transcendentsky/latent-augment | 50f43fdf9f4ab18e5a1976d873f27be3f7cddab6 | [
"MIT"
] | 90 | 2018-11-06T09:03:41.000Z | 2022-03-25T09:30:36.000Z | from .dropblock import DropBlock2D, DropBlock3D
from .scheduler import LinearScheduler
__all__ = ['DropBlock2D', 'DropBlock3D', 'LinearScheduler']
| 29.6 | 59 | 0.804054 |
79568a759b9c7ec37a7adc62c1d5ced122d99786 | 2,580 | py | Python | VimbaCam/ColorMap/ehtplot/ehtplot/extra/metroize.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | 1 | 2021-06-14T11:51:37.000Z | 2021-06-14T11:51:37.000Z | VimbaCam/ColorMap/ehtplot/ehtplot/extra/metroize.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | null | null | null | VimbaCam/ColorMap/ehtplot/ehtplot/extra/metroize.py | zzpwahaha/VimbaCamJILA | 3baed1b5313e6c198d54a33c2c84357035d5146a | [
"MIT"
] | 2 | 2021-01-20T16:22:57.000Z | 2021-02-14T12:31:02.000Z | # Copyright (C) 2018 Chi-kwan Chan
# Copyright (C) 2018 Steward Observatory
#
# This file is part of ehtplot.
#
# ehtplot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ehtplot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ehtplot. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from skimage.morphology import skeletonize
def rebin(arr, shape=[32, 32]):
reshape = (shape[0], arr.shape[0]//shape[0],
shape[1], arr.shape[1]//shape[1])
return arr.reshape(reshape).mean(-1).mean(1)
def translate_threshold(img, threshold=0.5):
threshold *= np.sum(img)
s = np.sort(img.flatten())
i = np.searchsorted(np.cumsum(s), threshold, side="left")
return s[i]
def metroize(img, mgrid=32, threshold=0.5):
threshold = translate_threshold(img, threshold=threshold)
img = skeletonize(img > threshold)
img = skeletonize(rebin(img, [mgrid, mgrid]) > 0)
return img
def plot_metroized(ax, img, **kwargs):
img = metroize(img, **kwargs)
sh = img.shape
s0 = sh[0]
s1 = sh[1]
for i in range(sh[0]):
for j in range(sh[1]):
if img[i,j] == 0.0: continue
c = 0
for ii in [i-1,i,i+1]:
for jj in [j-1,j,j+1]:
if ii == i and jj == j:
continue
if ii < 0 or ii >= s0:
continue
if jj < 0 or jj >= s1:
continue
if img[ii,jj] > 0.0:
if ii != i and jj != j:
if img[ii,j] > 0.0 or img[i,jj] > 0.0:
continue
ax.plot([j,(jj-j)/2+j], s1-np.array([i,(ii-i)/2+i]),
color='k')
c += 1
if c == 0:
ax.plot([j], [s1-i], marker='.', color='k')
ax.set_xlim([0, sh[1]])
ax.set_ylim([0, sh[0]])
ax.tick_params(axis='both', which='both',
top=False, bottom=False, labelbottom=False,
left=False, right=False, labelleft=False)
| 33.506494 | 76 | 0.546512 |
79568b44301cecffd209da399346ed16c72b3863 | 1,688 | py | Python | python/ray/serve/examples/echo_batching.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | null | null | null | python/ray/serve/examples/echo_batching.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | null | null | null | python/ray/serve/examples/echo_batching.py | vermashresth/ray | 9aaaa508cacb90a5be714478970b2191aaa43170 | [
"Apache-2.0"
] | null | null | null | """
This example has backend which has batching functionality enabled.
"""
import ray
from ray import serve
from ray.serve import BackendConfig
class MagicCounter:
def __init__(self, increment):
self.increment = increment
@serve.accept_batch
def __call__(self, flask_request, base_number=None):
# __call__ fn should preserve the batch size
# base_number is a python list
if serve.context.batch_size is not None:
batch_size = serve.context.batch_size
result = []
for base_num in base_number:
ret_str = "Number: {} Batch size: {}".format(
base_num, batch_size)
result.append(ret_str)
return result
return ""
serve.init(blocking=True)
serve.create_endpoint("magic_counter", "/counter")
# specify max_batch_size in BackendConfig
b_config = BackendConfig(max_batch_size=5)
serve.create_backend(
MagicCounter, "counter:v1", 42, backend_config=b_config) # increment=42
print("Backend Config for backend: 'counter:v1'")
print(b_config)
serve.link("magic_counter", "counter:v1")
handle = serve.get_handle("magic_counter")
future_list = []
# fire 30 requests
for r in range(30):
print("> [REMOTE] Pinging handle.remote(base_number={})".format(r))
f = handle.remote(base_number=r)
future_list.append(f)
# get results of queries as they complete
left_futures = future_list
while left_futures:
completed_futures, remaining_futures = ray.wait(left_futures, timeout=0.05)
if len(completed_futures) > 0:
result = ray.get(completed_futures[0])
print("< " + result)
left_futures = remaining_futures
| 29.614035 | 79 | 0.685427 |
79568b6e2e43e78c190df693a041cf0565e5e485 | 71,331 | py | Python | flink-python/pyflink/table/table_environment.py | arunjoykalathoor/flink | f171b9e33dccb5ec49f6b01ab505d0b4f526456c | [
"MIT",
"Apache-2.0",
"MIT-0",
"BSD-3-Clause"
] | 1 | 2021-10-16T00:48:35.000Z | 2021-10-16T00:48:35.000Z | flink-python/pyflink/table/table_environment.py | arunjoykalathoor/flink | f171b9e33dccb5ec49f6b01ab505d0b4f526456c | [
"MIT",
"Apache-2.0",
"MIT-0",
"BSD-3-Clause"
] | null | null | null | flink-python/pyflink/table/table_environment.py | arunjoykalathoor/flink | f171b9e33dccb5ec49f6b01ab505d0b4f526456c | [
"MIT",
"Apache-2.0",
"MIT-0",
"BSD-3-Clause"
] | 1 | 2021-03-06T15:38:45.000Z | 2021-03-06T15:38:45.000Z | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import sys
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from typing import Union, List, Tuple
from py4j.java_gateway import get_java_class, get_method
from pyflink.common import JobExecutionResult
from pyflink.dataset import ExecutionEnvironment
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.table.catalog import Catalog
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.statement_set import StatementSet
from pyflink.table.table_config import TableConfig
from pyflink.table.descriptors import StreamTableDescriptor, BatchTableDescriptor
from pyflink.java_gateway import get_gateway
from pyflink.table import Table, EnvironmentSettings
from pyflink.table.table_result import TableResult
from pyflink.table.types import _to_java_type, _create_type_verifier, RowType, DataType, \
_infer_schema_from_data, _create_converter, from_arrow_type, RowField, create_arrow_schema
from pyflink.util import utils
from pyflink.util.utils import get_j_env_configuration, is_local_deployment, load_java_class, \
to_j_explain_detail_arr
__all__ = [
'BatchTableEnvironment',
'StreamTableEnvironment',
'TableEnvironment'
]
class TableEnvironment(object, metaclass=ABCMeta):
"""
A table environment is the base class, entry point, and central context for creating Table
and SQL API programs.
It is unified for bounded and unbounded data processing.
A table environment is responsible for:
- Connecting to external systems.
- Registering and retrieving :class:`~pyflink.table.Table` and other meta objects from a
catalog.
- Executing SQL statements.
- Offering further configuration options.
The path in methods such as :func:`create_temporary_view`
should be a proper SQL identifier. The syntax is following
[[catalog-name.]database-name.]object-name, where the catalog name and database are optional.
For path resolution see :func:`use_catalog` and :func:`use_database`. All keywords or other
special characters need to be escaped.
Example: `cat.1`.`db`.`Table` resolves to an object named 'Table' (table is a reserved
keyword, thus must be escaped) in a catalog named 'cat.1' and database named 'db'.
.. note::
This environment is meant for pure table programs. If you would like to convert from or to
other Flink APIs, it might be necessary to use one of the available language-specific table
environments in the corresponding bridging modules.
"""
def __init__(self, j_tenv, serializer=PickleSerializer()):
self._j_tenv = j_tenv
self._is_blink_planner = TableEnvironment._judge_blink_planner(j_tenv)
self._serializer = serializer
# When running in MiniCluster, launch the Python UDF worker using the Python executable
# specified by sys.executable if users have not specified it explicitly via configuration
# python.executable.
self._set_python_executable_for_local_executor()
@staticmethod
def _judge_blink_planner(j_tenv):
if "getPlanner" not in dir(j_tenv):
return False
else:
j_planner_class = j_tenv.getPlanner().getClass()
j_blink_planner_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.planner.delegation.PlannerBase)
return j_blink_planner_class.isAssignableFrom(j_planner_class)
def from_table_source(self, table_source):
"""
Creates a table from a table source.
Example:
::
>>> csv_table_source = CsvTableSource(
... csv_file_path, ['a', 'b'], [DataTypes.STRING(), DataTypes.BIGINT()])
>>> table_env.from_table_source(csv_table_source)
:param table_source: The table source used as table.
:type table_source: pyflink.table.TableSource
:return: The result table.
:rtype: pyflink.table.Table
"""
return Table(self._j_tenv.fromTableSource(table_source._j_table_source), self)
def register_catalog(self, catalog_name, catalog):
"""
Registers a :class:`~pyflink.table.catalog.Catalog` under a unique name.
All tables registered in the :class:`~pyflink.table.catalog.Catalog` can be accessed.
:param catalog_name: The name under which the catalog will be registered.
:type catalog_name: str
:param catalog: The catalog to register.
:type catalog: pyflink.table.catalog.Catalog
"""
self._j_tenv.registerCatalog(catalog_name, catalog._j_catalog)
def get_catalog(self, catalog_name):
"""
Gets a registered :class:`~pyflink.table.catalog.Catalog` by name.
:param catalog_name: The name to look up the :class:`~pyflink.table.catalog.Catalog`.
:type catalog_name: str
:return: The requested catalog, None if there is no
registered catalog with given name.
:rtype: pyflink.table.catalog.Catalog
"""
catalog = self._j_tenv.getCatalog(catalog_name)
if catalog.isPresent():
return Catalog._get(catalog.get())
else:
return None
def register_table(self, name, table):
"""
Registers a :class:`~pyflink.table.Table` under a unique name in the TableEnvironment's
catalog. Registered tables can be referenced in SQL queries.
Example:
::
>>> tab = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
>>> table_env.register_table("source", tab)
:param name: The name under which the table will be registered.
:type name: str
:param table: The table to register.
:type table: pyflink.table.Table
.. note:: Deprecated in 1.10. Use :func:`create_temporary_view` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_temporary_view instead.", DeprecationWarning)
self._j_tenv.registerTable(name, table._j_table)
def register_table_source(self, name, table_source):
"""
Registers an external :class:`~pyflink.table.TableSource` in this
:class:`~pyflink.table.TableEnvironment`'s catalog. Registered tables can be referenced in
SQL queries.
Example:
::
>>> table_env.register_table_source("source",
... CsvTableSource("./1.csv",
... ["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()]))
:param name: The name under which the table source is registered.
:type name: str
:param table_source: The table source to register.
:type table_source: pyflink.table.TableSource
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSourceInternal(name, table_source._j_table_source)
def register_table_sink(self, name, table_sink):
"""
Registers an external :class:`~pyflink.table.TableSink` with given field names and types in
this :class:`~pyflink.table.TableEnvironment`'s catalog. Registered sink tables can be
referenced in SQL DML statements.
Example:
::
>>> table_env.register_table_sink("sink",
... CsvTableSink(["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()],
... "./2.csv"))
:param name: The name under which the table sink is registered.
:type name: str
:param table_sink: The table sink to register.
:type table_sink: pyflink.table.TableSink
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use connect instead.", DeprecationWarning)
self._j_tenv.registerTableSinkInternal(name, table_sink._j_table_sink)
def scan(self, *table_path):
"""
Scans a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the TableEnvironment. It can be either directly
registered or be an external member of a :class:`~pyflink.table.catalog.Catalog`.
See the documentation of :func:`~pyflink.table.TableEnvironment.use_database` or
:func:`~pyflink.table.TableEnvironment.use_catalog` for the rules on the path resolution.
Examples:
Scanning a directly registered table
::
>>> tab = table_env.scan("tableName")
Scanning a table from a registered catalog
::
>>> tab = table_env.scan("catalogName", "dbName", "tableName")
:param table_path: The path of the table to scan.
:type table_path: str
:throws: Exception if no table is found using the given table path.
:return: The resulting table.
:rtype: pyflink.table.Table
.. note:: Deprecated in 1.10. Use :func:`from_path` instead.
"""
warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
gateway = get_gateway()
j_table_paths = utils.to_jarray(gateway.jvm.String, table_path)
j_table = self._j_tenv.scan(j_table_paths)
return Table(j_table, self)
def from_path(self, path):
"""
Reads a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the :class:`~pyflink.table.TableEnvironment`.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Examples:
Reading a table from default catalog and database.
::
>>> tab = table_env.from_path("tableName")
Reading a table from a registered catalog.
::
>>> tab = table_env.from_path("catalogName.dbName.tableName")
Reading a table from a registered catalog with escaping. (`Table` is a reserved keyword).
Dots in e.g. a database name also must be escaped.
::
>>> tab = table_env.from_path("catalogName.`db.Name`.`Table`")
:param path: The path of a table API object to scan.
:type path: str
:return: Either a table or virtual table (=view).
:rtype: pyflink.table.Table
.. seealso:: :func:`use_catalog`
.. seealso:: :func:`use_database`
.. versionadded:: 1.10.0
"""
return Table(get_method(self._j_tenv, "from")(path), self)
def insert_into(self, target_path, table):
"""
Instructs to write the content of a :class:`~pyflink.table.Table` API object into a table.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Example:
::
>>> tab = table_env.scan("tableName")
>>> table_env.insert_into("sink", tab)
:param target_path: The path of the registered :class:`~pyflink.table.TableSink` to which
the :class:`~pyflink.table.Table` is written.
:type target_path: str
:param table: The Table to write to the sink.
:type table: pyflink.table.Table
.. versionchanged:: 1.10.0
The signature is changed, e.g. the parameter *table_path_continued* was removed and
the parameter *target_path* is moved before the parameter *table*.
.. note:: Deprecated in 1.11. Use :func:`execute_insert` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use execute_insert for single sink,"
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._j_tenv.insertInto(target_path, table._j_table)
def list_catalogs(self):
"""
Gets the names of all catalogs registered in this environment.
:return: List of catalog names.
:rtype: list[str]
"""
j_catalog_name_array = self._j_tenv.listCatalogs()
return [item for item in j_catalog_name_array]
def list_modules(self):
"""
Gets the names of all modules registered in this environment.
:return: List of module names.
:rtype: list[str]
.. versionadded:: 1.10.0
"""
j_module_name_array = self._j_tenv.listModules()
return [item for item in j_module_name_array]
def list_databases(self):
"""
Gets the names of all databases in the current catalog.
:return: List of database names in the current catalog.
:rtype: list[str]
"""
j_database_name_array = self._j_tenv.listDatabases()
return [item for item in j_database_name_array]
def list_tables(self):
"""
Gets the names of all tables and views in the current database of the current catalog.
It returns both temporary and permanent tables and views.
:return: List of table and view names in the current database of the current catalog.
:rtype: list[str]
"""
j_table_name_array = self._j_tenv.listTables()
return [item for item in j_table_name_array]
def list_views(self):
"""
Gets the names of all views in the current database of the current catalog.
It returns both temporary and permanent views.
:return: List of view names in the current database of the current catalog.
:rtype: list[str]
.. versionadded:: 1.11.0
"""
j_view_name_array = self._j_tenv.listViews()
return [item for item in j_view_name_array]
def list_user_defined_functions(self):
"""
Gets the names of all user defined functions registered in this environment.
:return: List of the names of all user defined functions registered in this environment.
:rtype: list[str]
"""
j_udf_name_array = self._j_tenv.listUserDefinedFunctions()
return [item for item in j_udf_name_array]
def list_functions(self):
"""
Gets the names of all functions in this environment.
:return: List of the names of all functions in this environment.
:rtype: list[str]
.. versionadded:: 1.10.0
"""
j_function_name_array = self._j_tenv.listFunctions()
return [item for item in j_function_name_array]
def list_temporary_tables(self):
"""
Gets the names of all temporary tables and views available in the current namespace
(the current database of the current catalog).
:return: A list of the names of all registered temporary tables and views in the current
database of the current catalog.
:rtype: list[str]
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_table_name_array = self._j_tenv.listTemporaryTables()
return [item for item in j_table_name_array]
def list_temporary_views(self):
"""
Gets the names of all temporary views available in the current namespace (the current
database of the current catalog).
:return: A list of the names of all registered temporary views in the current database
of the current catalog.
:rtype: list[str]
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_view_name_array = self._j_tenv.listTemporaryViews()
return [item for item in j_view_name_array]
def drop_temporary_table(self, table_path):
"""
Drops a temporary table registered in the given path.
If a permanent table with a given path exists, it will be used
from now on for any queries that reference this path.
:param table_path: The path of the registered temporary table.
:type table_path: str
:return: True if a table existed in the given path and was removed.
:rtype: bool
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryTable(table_path)
def drop_temporary_view(self, view_path):
"""
Drops a temporary view registered in the given path.
If a permanent table or view with a given path exists, it will be used
from now on for any queries that reference this path.
:return: True if a view existed in the given path and was removed.
:rtype: bool
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryView(view_path)
def explain(self, table=None, extended=False):
"""
Returns the AST of the specified Table API and SQL queries and the execution plan to compute
the result of the given :class:`~pyflink.table.Table` or multi-sinks plan.
:param table: The table to be explained. If table is None, explain for multi-sinks plan,
else for given table.
:type table: pyflink.table.Table
:param extended: If the plan should contain additional properties.
e.g. estimated cost, traits
:type extended: bool
:return: The table for which the AST and execution plan will be returned.
:rtype: str
.. note:: Deprecated in 1.11. Use :class:`Table`#:func:`explain` instead.
"""
warnings.warn("Deprecated in 1.11. Use Table#explain instead.", DeprecationWarning)
if table is None:
return self._j_tenv.explain(extended)
else:
return self._j_tenv.explain(table._j_table, extended)
def explain_sql(self, stmt, *extra_details):
"""
Returns the AST of the specified statement and the execution plan.
:param stmt: The statement for which the AST and execution plan will be returned.
:type stmt: str
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:type extra_details: tuple[ExplainDetail] (variable-length arguments of ExplainDetail)
:return: The statement for which the AST and execution plan will be returned.
:rtype: str
.. versionadded:: 1.11.0
"""
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_tenv.explainSql(stmt, j_extra_details)
def sql_query(self, query):
"""
Evaluates a SQL query on registered tables and retrieves the result as a
:class:`~pyflink.table.Table`.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
>>> table = ...
# the table is not registered to the table environment
>>> table_env.sql_query("SELECT * FROM %s" % table)
:param query: The sql query string.
:type query: str
:return: The result table.
:rtype: pyflink.table.Table
"""
j_table = self._j_tenv.sqlQuery(query)
return Table(j_table, self)
def execute_sql(self, stmt):
"""
Execute the given single statement, and return the execution result.
The statement can be DDL/DML/DQL/SHOW/DESCRIBE/EXPLAIN/USE.
For DML and DQL, this method returns TableResult once the job has been submitted.
For DDL and DCL statements, TableResult is returned once the operation has finished.
:return content for DQL/SHOW/DESCRIBE/EXPLAIN,
the affected row count for `DML` (-1 means unknown),
or a string message ("OK") for other statements.
.. versionadded:: 1.11.0
"""
self._before_execute()
return TableResult(self._j_tenv.executeSql(stmt))
def create_statement_set(self):
"""
Create a StatementSet instance which accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
:return statement_set instance
:rtype: pyflink.table.StatementSet
.. versionadded:: 1.11.0
"""
_j_statement_set = self._j_tenv.createStatementSet()
return StatementSet(_j_statement_set, self)
def sql_update(self, stmt):
"""
Evaluates a SQL statement such as INSERT, UPDATE or DELETE or a DDL statement
.. note::
Currently only SQL INSERT statements and CREATE TABLE statements are supported.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
# register the table sink into which the result is inserted.
>>> table_env.register_table_sink("sink_table", table_sink)
>>> source_table = ...
# source_table is not registered to the table environment
>>> table_env.sql_update("INSERT INTO sink_table SELECT * FROM %s" % source_table)
A DDL statement can also be executed to create/drop a table:
For example, the below DDL statement would create a CSV table named `tbl1`
into the current catalog::
create table tbl1(
a int,
b bigint,
c varchar
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = 'xxx'
)
SQL queries can directly execute as follows:
::
>>> source_ddl = \\
... '''
... create table sourceTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'kafka',
... 'update-mode' = 'append',
... 'connector.topic' = 'xxx',
... 'connector.properties.bootstrap.servers' = 'localhost:9092'
... )
... '''
>>> sink_ddl = \\
... '''
... create table sinkTable(
... a int,
... b varchar
... ) with (
... 'connector.type' = 'filesystem',
... 'format.type' = 'csv',
... 'connector.path' = 'xxx'
... )
... '''
>>> query = "INSERT INTO sinkTable SELECT FROM sourceTable"
>>> table_env.sql(source_ddl)
>>> table_env.sql(sink_ddl)
>>> table_env.sql(query)
>>> table_env.execute("MyJob")
:param stmt: The SQL statement to evaluate.
:type stmt: str
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single statement,
use :func:`create_statement_set` for multiple DML statements.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single statement, "
"use create_statement_set for multiple DML statements.", DeprecationWarning)
self._j_tenv.sqlUpdate(stmt)
def get_current_catalog(self):
"""
Gets the current default catalog name of the current session.
:return: The current default catalog name that is used for the path resolution.
:rtype: str
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
"""
return self._j_tenv.getCurrentCatalog()
def use_catalog(self, catalog_name):
"""
Sets the current catalog to the given value. It also sets the default
database to the catalog's default one.
See also :func:`~TableEnvironment.use_database`.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:param catalog_name: The name of the catalog to set as the current default catalog.
:type catalog_name: str
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if a catalog with given
name could not be set as the default one.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
self._j_tenv.useCatalog(catalog_name)
def get_current_database(self):
"""
Gets the current default database name of the running session.
:return: The name of the current database of the current catalog.
:rtype: str
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
return self._j_tenv.getCurrentDatabase()
def use_database(self, database_name):
"""
Sets the current default database. It has to exist in the current catalog. That path will
be used as the default one when looking for unqualified object names.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if the given catalog and
database could not be set as the default ones.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
:param database_name: The name of the database to set as the current database.
:type database_name: str
"""
self._j_tenv.useDatabase(database_name)
@abstractmethod
def get_config(self):
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
:rtype: pyflink.table.TableConfig
"""
pass
@abstractmethod
def connect(self, connector_descriptor):
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
Example:
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:type connector_descriptor: pyflink.table.descriptors.ConnectorDescriptor
:return: A :class:`~pyflink.table.descriptors.ConnectTableDescriptor` used to build the
temporary table.
:rtype: pyflink.table.descriptors.ConnectTableDescriptor
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
pass
def register_java_function(self, name, function_class_name):
"""
Registers a java user defined function under a unique name. Replaces already existing
user-defined functions under this name. The acceptable function type contains
**ScalarFunction**, **TableFunction** and **AggregateFunction**.
Example:
::
>>> table_env.register_java_function("func1", "java.user.defined.function.class.name")
:param name: The name under which the function is registered.
:type name: str
:param function_class_name: The java full qualified class name of the function to register.
The function must have a public no-argument constructor and can
be founded in current Java classloader.
:type function_class_name: str
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader()\
.loadClass(function_class_name).newInstance()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self._is_blink_planner and isinstance(self, BatchTableEnvironment):
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def register_function(self, name, function):
"""
Registers a python user-defined function under a unique name. Replaces already existing
user-defined function under this name.
Example:
::
>>> table_env.register_function(
... "add_one", udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT()))
>>> @udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
... result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.register_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.register_function(
... "subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()))
:param name: The name under which the function is registered.
:type name: str
:param function: The python user-defined function to register.
:type function: pyflink.table.udf.UserDefinedFunctionWrapper
.. versionadded:: 1.10.0
"""
java_function = function.java_user_defined_function()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self._is_blink_planner and isinstance(self, BatchTableEnvironment) and \
self._is_table_function(java_function):
self._register_table_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def create_temporary_view(self, view_path, table):
"""
Registers a :class:`~pyflink.table.Table` API object as a temporary view similar to SQL
temporary views.
Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
it will be inaccessible in the current session. To make the permanent object available
again you can drop the corresponding temporary object.
:param view_path: The path under which the view will be registered. See also the
:class:`~pyflink.table.TableEnvironment` class description for the format
of the path.
:type view_path: str
:param table: The view to register.
:type table: pyflink.table.Table
.. versionadded:: 1.10.0
"""
self._j_tenv.createTemporaryView(view_path, table._j_table)
def add_python_file(self, file_path):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
:type file_path: str
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_files = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([python_files, file_path])
else:
python_files = file_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self, requirements_file_path, requirements_cache_dir=None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> table_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:type requirements_file_path: str
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
:type requirements_cache_dir: str
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path, target_dir=None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> table_env.add_python_archive("py_env.zip", "myenv")
>>> table_env.get_config().set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:type archive_path: str
:param target_dir: Optional, the target dir name that the archive file extracted to.
:type target_dir: str
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
python_archives = self.get_config().get_configuration().get_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
self.get_config().get_configuration().set_string(
jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def execute(self, job_name):
"""
Triggers the program execution. The environment will execute all parts of
the program.
The program execution will be logged and displayed with the provided name.
.. note::
It is highly advised to set all parameters in the :class:`~pyflink.table.TableConfig`
on the very beginning of the program. It is undefined what configurations values will
be used for the execution if queries are mixed with config changes. It depends on
the characteristic of the particular parameter. For some of them the value from the
point in time of query construction (e.g. the current catalog) will be used. On the
other hand some values might be evaluated according to the state from the time when
this method is called (e.g. timezone).
:param job_name: Desired name of the job.
:type job_name: str
:return: The result of the job execution, containing elapsed time and accumulators.
.. note:: Deprecated in 1.11. Use :func:`execute_sql` for single sink,
use :func:`create_statement_set` for multiple sinks.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql for single sink, "
"use create_statement_set for multiple sinks.", DeprecationWarning)
self._before_execute()
return JobExecutionResult(self._j_tenv.execute(job_name))
def from_elements(self, elements, schema=None, verify_schema=True):
"""
Creates a table from a collection of elements.
The elements types must be acceptable atomic types or acceptable composite types.
All elements must be of the same type.
If the elements types are composite types, the composite types must be strictly equal,
and its subtypes must also be acceptable types.
e.g. if the elements are tuples, the length of the tuples must be equal, the element types
of the tuples must be equal in order.
The built-in acceptable atomic element types contains:
**int**, **long**, **str**, **unicode**, **bool**,
**float**, **bytearray**, **datetime.date**, **datetime.time**, **datetime.datetime**,
**datetime.timedelta**, **decimal.Decimal**
The built-in acceptable composite element types contains:
**list**, **tuple**, **dict**, **array**, :class:`~pyflink.table.Row`
If the element type is a composite type, it will be unboxed.
e.g. table_env.from_elements([(1, 'Hi'), (2, 'Hello')]) will return a table like:
+----+-------+
| _1 | _2 |
+====+=======+
| 1 | Hi |
+----+-------+
| 2 | Hello |
+----+-------+
"_1" and "_2" are generated field names.
Example:
::
# use the second parameter to specify custom field names
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
# use the second parameter to specify custom table schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]))
# use the thrid parameter to switch whether to verify the elements against the schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]),
... False)
:param elements: The elements to create a table from.
:type elements: list
:param schema: The schema of the table.
:type schema: pyflink.table.types.DataType or list[str]
:param verify_schema: Whether to verify the elements against the schema.
:type verify_schema: bool
:return: The result table.
:rtype: pyflink.table.Table
"""
# verifies the elements against the specified schema
if isinstance(schema, RowType):
verify_func = _create_type_verifier(schema) if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
data_type = schema
schema = RowType().add("value", schema)
verify_func = _create_type_verifier(
data_type, name="field value") if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
else:
def verify_obj(obj):
return obj
if "__len__" not in dir(elements):
elements = list(elements)
# infers the schema if not specified
if schema is None or isinstance(schema, (list, tuple)):
schema = _infer_schema_from_data(elements, names=schema)
converter = _create_converter(schema)
elements = map(converter, elements)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
schema.fields[i].name = name
schema.names[i] = name
elif not isinstance(schema, RowType):
raise TypeError(
"schema should be RowType, list, tuple or None, but got: %s" % schema)
# verifies the elements against the specified schema
elements = map(verify_obj, elements)
# converts python data to sql data
elements = [schema.to_sql_type(element) for element in elements]
return self._from_elements(elements, schema)
def _from_elements(self, elements, schema):
"""
Creates a table from a collection of elements.
:param elements: The elements to create a table from.
:return: The result :class:`~pyflink.table.Table`.
"""
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(self._serializer)
try:
with temp_file:
serializer.dump_to_stream(elements, temp_file)
row_type_info = _to_java_type(schema)
execution_config = self._get_j_env().getConfig()
gateway = get_gateway()
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True)
if self._is_blink_planner:
PythonTableUtils = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm \
.org.apache.flink.table.planner.utils.python.PythonInputFormatTableSource
else:
PythonTableUtils = gateway.jvm.PythonTableUtils
PythonInputFormatTableSource = gateway.jvm.PythonInputFormatTableSource
j_input_format = PythonTableUtils.getInputFormat(
j_objs, row_type_info, execution_config)
j_table_source = PythonInputFormatTableSource(
j_input_format, row_type_info)
return Table(self._j_tenv.fromTableSource(j_table_source), self)
finally:
os.unlink(temp_file.name)
def from_pandas(self, pdf,
schema: Union[RowType, List[str], Tuple[str], List[DataType],
Tuple[DataType]] = None,
splits_num: int = 1) -> Table:
"""
Creates a table from a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
# use the second parameter to specify custom field names
>>> table_env.from_pandas(pdf, ["a", "b"])
# use the second parameter to specify custom field types
>>> table_env.from_pandas(pdf, [DataTypes.DOUBLE(), DataTypes.DOUBLE()]))
# use the second parameter to specify custom table schema
>>> table_env.from_pandas(pdf,
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
... DataTypes.FIELD("b", DataTypes.DOUBLE())]))
:param pdf: The pandas DataFrame.
:param schema: The schema of the converted table.
:param splits_num: The number of splits the given Pandas DataFrame will be split into. It
determines the number of parallel source tasks.
If not specified, the default parallelism will be used.
:return: The result table.
.. versionadded:: 1.11.0
"""
if not self._is_blink_planner and isinstance(self, BatchTableEnvironment):
raise TypeError("It doesn't support to convert from Pandas DataFrame in the batch "
"mode of old planner")
import pandas as pd
if not isinstance(pdf, pd.DataFrame):
raise TypeError("Unsupported type, expected pandas.DataFrame, got %s" % type(pdf))
import pyarrow as pa
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
if schema is not None:
if isinstance(schema, RowType):
result_type = schema
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], str):
result_type = RowType(
[RowField(field_name, from_arrow_type(field.type, field.nullable))
for field_name, field in zip(schema, arrow_schema)])
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], DataType):
result_type = RowType(
[RowField(field_name, field_type) for field_name, field_type in zip(
arrow_schema.names, schema)])
else:
raise TypeError("Unsupported schema type, it could only be of RowType, a "
"list of str or a list of DataType, got %s" % schema)
else:
result_type = RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_schema])
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
import pytz
serializer = ArrowSerializer(
create_arrow_schema(result_type.field_names(), result_type.field_types()),
result_type,
pytz.timezone(self.get_config().get_local_timezone()))
step = -(-len(pdf) // splits_num)
pdf_slices = [pdf.iloc[start:start + step] for start in range(0, len(pdf), step)]
data = [[c for (_, c) in pdf_slice.iteritems()] for pdf_slice in pdf_slices]
try:
with temp_file:
serializer.dump_to_stream(data, temp_file)
jvm = get_gateway().jvm
data_type = jvm.org.apache.flink.table.types.utils.TypeConversions\
.fromLegacyInfoToDataType(_to_java_type(result_type))
if self._is_blink_planner:
data_type = data_type.bridgedTo(
load_java_class('org.apache.flink.table.data.RowData'))
j_arrow_table_source = \
jvm.org.apache.flink.table.runtime.arrow.ArrowUtils.createArrowTableSource(
data_type, temp_file.name)
return Table(self._j_tenv.fromTableSource(j_arrow_table_source), self)
finally:
os.unlink(temp_file.name)
def _set_python_executable_for_local_executor(self):
jvm = get_gateway().jvm
j_config = get_j_env_configuration(self)
if not j_config.containsKey(jvm.PythonOptions.PYTHON_EXECUTABLE.key()) \
and is_local_deployment(j_config):
j_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), sys.executable)
def _add_jars_to_j_env_config(self, config_key):
jvm = get_gateway().jvm
jar_urls = self.get_config().get_configuration().get_string(config_key, None)
if jar_urls is not None:
# normalize and remove duplicates
jar_urls_set = set([jvm.java.net.URL(url).toString() for url in jar_urls.split(";")])
j_configuration = get_j_env_configuration(self)
if j_configuration.containsKey(config_key):
for url in j_configuration.getString(config_key, "").split(";"):
jar_urls_set.add(url)
j_configuration.setString(config_key, ";".join(jar_urls_set))
@abstractmethod
def _get_j_env(self):
pass
@staticmethod
def _is_table_function(java_function):
java_function_class = java_function.getClass()
j_table_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.TableFunction)
return j_table_function_class.isAssignableFrom(java_function_class)
@staticmethod
def _is_aggregate_function(java_function):
java_function_class = java_function.getClass()
j_aggregate_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.UserDefinedAggregateFunction)
return j_aggregate_function_class.isAssignableFrom(java_function_class)
def _register_table_function(self, name, table_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfTableFunction(table_function)
function_catalog.registerTempSystemTableFunction(name, table_function, result_type)
def _register_aggregate_function(self, name, aggregate_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfAggregateFunction(aggregate_function)
acc_type = helper.getAccumulatorTypeOfAggregateFunction(aggregate_function)
function_catalog.registerTempSystemAggregateFunction(
name, aggregate_function, result_type, acc_type)
def _get_function_catalog(self):
function_catalog_field = self._j_tenv.getClass().getDeclaredField("functionCatalog")
function_catalog_field.setAccessible(True)
function_catalog = function_catalog_field.get(self._j_tenv)
return function_catalog
def _before_execute(self):
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
self._add_jars_to_j_env_config(jars_key)
self._add_jars_to_j_env_config(classpaths_key)
class StreamTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
self._j_tenv = j_tenv
super(StreamTableEnvironment, self).__init__(j_tenv)
def _get_j_env(self):
if self._is_blink_planner:
return self._j_tenv.getPlanner().getExecEnv()
else:
return self._j_tenv.getPlanner().getExecutionEnvironment()
def get_config(self):
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
:rtype: pyflink.table.TableConfig
"""
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
return table_config
def connect(self, connector_descriptor):
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:type connector_descriptor: pyflink.table.descriptors.ConnectorDescriptor
:return: A :class:`~pyflink.table.descriptors.StreamTableDescriptor` used to build the
temporary table.
:rtype: pyflink.table.descriptors.StreamTableDescriptor
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
@staticmethod
def create(stream_execution_environment=None, table_config=None, environment_settings=None):
"""
Creates a :class:`~pyflink.table.StreamTableEnvironment`.
Example:
::
# create with StreamExecutionEnvironment.
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> table_env = StreamTableEnvironment.create(env)
# create with StreamExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = StreamTableEnvironment.create(env, table_config)
# create with StreamExecutionEnvironment and EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().use_blink_planner() \\
... .build()
>>> table_env = StreamTableEnvironment.create(
... env, environment_settings=environment_settings)
# create with EnvironmentSettings.
>>> table_env = StreamTableEnvironment.create(environment_settings=environment_settings)
:param stream_execution_environment: The
:class:`~pyflink.datastream.StreamExecutionEnvironment`
of the TableEnvironment.
:type stream_execution_environment: pyflink.datastream.StreamExecutionEnvironment
:param table_config: The configuration of the TableEnvironment, optional.
:type table_config: pyflink.table.TableConfig
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:type environment_settings: pyflink.table.EnvironmentSettings
:return: The StreamTableEnvironment created from given StreamExecutionEnvironment and
configuration.
:rtype: pyflink.table.StreamTableEnvironment
"""
if stream_execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'stream_execution_environment' "
"or 'environment_settings' is required.")
elif stream_execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'stream_execution_environment' is also required.")
if table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if not environment_settings.is_streaming_mode():
raise ValueError("The environment settings for StreamTableEnvironment must be "
"set to streaming mode.")
if stream_execution_environment is None:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
environment_settings._j_environment_settings)
else:
if table_config is not None:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
table_config._j_table_config)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment)
return StreamTableEnvironment(j_tenv)
class BatchTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
self._j_tenv = j_tenv
super(BatchTableEnvironment, self).__init__(j_tenv)
def _get_j_env(self):
if self._is_blink_planner:
return self._j_tenv.getPlanner().getExecEnv()
else:
return self._j_tenv.execEnv()
def get_config(self):
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
:rtype: pyflink.table.TableConfig
"""
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
return table_config
def connect(self, connector_descriptor):
"""
Creates a temporary table from a descriptor.
Descriptors allow for declaring the communication to external systems in an
implementation-agnostic way. The classpath is scanned for suitable table factories that
match the desired configuration.
The following example shows how to read from a connector using a JSON format and
registering a temporary table as "MyTable":
::
>>> table_env \\
... .connect(ExternalSystemXYZ()
... .version("0.11")) \\
... .with_format(Json()
... .json_schema("{...}")
... .fail_on_missing_field(False)) \\
... .with_schema(Schema()
... .field("user-name", "VARCHAR")
... .from_origin_field("u_name")
... .field("count", "DECIMAL")) \\
... .create_temporary_table("MyTable")
:param connector_descriptor: Connector descriptor describing the external system.
:type connector_descriptor: pyflink.table.descriptors.ConnectorDescriptor
:return: A :class:`~pyflink.table.descriptors.BatchTableDescriptor` or a
:class:`~pyflink.table.descriptors.StreamTableDescriptor` (for blink planner) used
to build the temporary table.
:rtype: pyflink.table.descriptors.BatchTableDescriptor or
pyflink.table.descriptors.StreamTableDescriptor
.. note:: Deprecated in 1.11. Use :func:`execute_sql` to register a table instead.
"""
warnings.warn("Deprecated in 1.11. Use execute_sql instead.", DeprecationWarning)
gateway = get_gateway()
blink_t_env_class = get_java_class(
gateway.jvm.org.apache.flink.table.api.internal.TableEnvironmentImpl)
if blink_t_env_class == self._j_tenv.getClass():
return StreamTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
else:
return BatchTableDescriptor(
self._j_tenv.connect(connector_descriptor._j_connector_descriptor))
@staticmethod
def create(execution_environment=None, table_config=None, environment_settings=None):
"""
Creates a :class:`~pyflink.table.BatchTableEnvironment`.
Example:
::
# create with ExecutionEnvironment.
>>> env = ExecutionEnvironment.get_execution_environment()
>>> table_env = BatchTableEnvironment.create(env)
# create with ExecutionEnvironment and TableConfig.
>>> table_config = TableConfig()
>>> table_config.set_null_check(False)
>>> table_env = BatchTableEnvironment.create(env, table_config)
# create with EnvironmentSettings.
>>> environment_settings = EnvironmentSettings.new_instance().in_batch_mode() \\
... .use_blink_planner().build()
>>> table_env = BatchTableEnvironment.create(environment_settings=environment_settings)
:param execution_environment: The batch :class:`~pyflink.dataset.ExecutionEnvironment` of
the TableEnvironment.
:type execution_environment: pyflink.dataset.ExecutionEnvironment
:param table_config: The configuration of the TableEnvironment, optional.
:type table_config: pyflink.table.TableConfig
:param environment_settings: The environment settings used to instantiate the
TableEnvironment. It provides the interfaces about planner
selection(flink or blink), optional.
:type environment_settings: pyflink.table.EnvironmentSettings
:return: The BatchTableEnvironment created from given ExecutionEnvironment and
configuration.
:rtype: pyflink.table.BatchTableEnvironment
"""
if execution_environment is None and \
table_config is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'execution_environment' "
"or 'environment_settings' is required.")
elif execution_environment is None and \
table_config is not None and \
environment_settings is None:
raise ValueError("Only the param 'table_config' is found, "
"the param 'execution_environment' is also required.")
elif execution_environment is not None and \
environment_settings is not None:
raise ValueError("The param 'execution_environment' and "
"'environment_settings' cannot be used at the same time")
elif table_config is not None and \
environment_settings is not None:
raise ValueError("The param 'table_config' and "
"'environment_settings' cannot be used at the same time")
gateway = get_gateway()
if environment_settings is not None:
if environment_settings.is_streaming_mode():
raise ValueError("The environment settings for BatchTableEnvironment must be "
"set to batch mode.")
JEnvironmentSettings = get_gateway().jvm.org.apache.flink.table.api.EnvironmentSettings
old_planner_class_name = EnvironmentSettings.new_instance().in_batch_mode() \
.use_old_planner().build()._j_environment_settings \
.toPlannerProperties()[JEnvironmentSettings.CLASS_NAME]
planner_properties = environment_settings._j_environment_settings.toPlannerProperties()
if JEnvironmentSettings.CLASS_NAME in planner_properties and \
planner_properties[JEnvironmentSettings.CLASS_NAME] == old_planner_class_name:
# The Java EnvironmentSettings API does not support creating table environment with
# old planner. Create it from other API.
j_tenv = gateway.jvm.BatchTableEnvironment.create(
ExecutionEnvironment.get_execution_environment()._j_execution_environment)
else:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
if table_config is None:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment)
else:
j_tenv = gateway.jvm.BatchTableEnvironment.create(
execution_environment._j_execution_environment,
table_config._j_table_config)
return BatchTableEnvironment(j_tenv)
| 43.707721 | 100 | 0.61753 |
79568bc81cdce983135cb747e3ba5214fd659d35 | 2,104 | py | Python | pylol/env/base_env_wrapper.py | jjlee0802cu/pylol | d2fc3bd6eed7546ed06481dfe5b1e4a0ddabe8e5 | [
"MIT"
] | 7 | 2020-11-09T12:49:23.000Z | 2022-03-14T22:22:50.000Z | pylol/env/base_env_wrapper.py | jjlee0802cu/pylol | d2fc3bd6eed7546ed06481dfe5b1e4a0ddabe8e5 | [
"MIT"
] | null | null | null | pylol/env/base_env_wrapper.py | jjlee0802cu/pylol | d2fc3bd6eed7546ed06481dfe5b1e4a0ddabe8e5 | [
"MIT"
] | 1 | 2022-03-30T08:26:22.000Z | 2022-03-30T08:26:22.000Z | # MIT License
#
# Copyright (c) 2020 MiscellaneousStuff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""A base env wrapper so we don't need to override everything every time."""
from pylol.env import environment
class BaseEnvWrapper(environment.Base):
"""A base env wrapper so we don't need to override everything every time."""
def __init__(self, env):
self.env = env
def close(self, *args, **kwargs):
return self.env.close(*args, **kwargs)
def action_spec(self, *args, **kwargs):
return self.env.action_spec(*args, **kwargs)
def observation_spec(self, *args, **kwargs):
return self.env.observation_spec(*args, **kwargs)
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def step(self, *args, **kwargs):
return self.env.step(*args, **kwargs)
def save_replay(self, *args, **kwargs):
return self.env.save_replay(*args, **kwargs)
@property
def state(self):
return self.env.state | 40.461538 | 81 | 0.690589 |
79568c9858c36468c3bf293de109e080a982004e | 34,858 | py | Python | src/main/python/smart/smartplots2_setup.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
] | 123 | 2017-04-06T20:17:19.000Z | 2022-03-02T13:42:15.000Z | src/main/python/smart/smartplots2_setup.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
] | 2,676 | 2017-04-26T20:27:27.000Z | 2022-03-31T16:39:53.000Z | src/main/python/smart/smartplots2_setup.py | cday97/beam | 7e1ab50eecaefafd04daab360f8b12bc7cab559b | [
"BSD-3-Clause-LBNL"
] | 60 | 2017-04-06T20:14:32.000Z | 2022-03-30T20:10:53.000Z | import matplotlib
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import os
plt.style.use('seaborn-colorblind')
# plt.style.use('ggplot')
plt.rcParams['axes.edgecolor'] = 'black'
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['savefig.facecolor'] = 'white'
plt.rcParams['savefig.edgecolor'] = 'black'
colors = {'blue': '#377eb8', 'green': '#227222', 'orange': '#C66200', 'purple': '#470467', 'red': '#B30C0C',
'yellow': '#C6A600', 'light.green': '#C0E0C0', 'magenta': '#D0339D', 'dark.blue': '#23128F',
'brown': '#542D06', 'grey': '#8A8A8A', 'dark.grey': '#2D2D2D', 'light.yellow': '#FFE664',
'light.purple': '#9C50C0', 'light.orange': '#FFB164', 'black': '#000000'}
mode_colors = {'RH': colors['red'],
'Car': colors['grey'],
'Walk': colors['green'],
'Transit': colors['blue'],
'RHT': colors['light.purple'],
'RHP': 'mediumorchid',
'CAV': colors['light.yellow'],
'Bike': colors['light.orange'],
'NM': colors['light.orange'],
'electricity': colors['blue'],
'gas': colors['purple'],
'diesel': colors['yellow']}
def getDfForPlt(_plt_setup2, _output_folder):
if not os.path.exists('{}/makeplots'.format(_output_folder)):
os.makedirs('{}/makeplots'.format(_output_folder))
top_labels = _plt_setup2['top_labels']
years = _plt_setup2['scenarios_year']
ids = _plt_setup2['scenarios_id']
iterations = _plt_setup2['scenarios_itr']
top_labels_xpos = [1]
bottom_labels_xpos = [1]
for i in range(1, len(top_labels)):
top_labels_xpos.append(top_labels_xpos[i-1] + 1 + i % 2)
if i % 2 == 0:
bottom_labels_xpos.append((top_labels_xpos[i] + top_labels_xpos[i-1])/2)
df = pd.DataFrame()
for i in range(len(ids)):
iteration = iterations[i]
year = years[i]
id = ids[i]
metrics_file = "{}/{}.{}.metrics-final.csv".format(_output_folder, year, iteration)
df_temp = pd.read_csv(metrics_file).fillna(0)
df = pd.concat([df, df_temp[df_temp['Rank'] == id]])
return (df.sort_values(by=['Rank']), top_labels_xpos, bottom_labels_xpos)
# plots
def pltModeSplitByTrips(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.modesplit_trips.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.modesplit_trips.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'cav_counts', 0)
data = pd.DataFrame(
{'transit': (df['drive_transit_counts'].values + df['ride_hail_transit_counts'].values + df['walk_transit_counts'].values),
'car': df['car_counts'].values,
'cav': df['cav_counts'].values,
'rh': df['ride_hail_counts'].values,
'rhp': df['ride_hail_pooled_counts'].values,
'walk': df['walk_counts'].values,
'bike': df['bike_counts'].values
})
data = data.div(data.sum(axis=1), axis=0)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_transit = plt.bar(x=top_labels_xpos, height=data['transit'], color=mode_colors['Transit'])
plt_car = plt.bar(x=top_labels_xpos, height=data['car'], bottom=data['transit'], color=mode_colors['Car'])
plt_cav = plt.bar(x=top_labels_xpos, height=data['cav'], bottom=data[['transit', 'car']].sum(axis=1), color=mode_colors['CAV'])
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], bottom=data[['transit', 'car', 'cav']].sum(axis=1), color=mode_colors['RH'])
plt_rhp = plt.bar(x=top_labels_xpos, height=data['rhp'], bottom=data[['transit', 'car', 'cav', 'rh']].sum(axis=1), color=mode_colors['RHP'])
plt_bike = plt.bar(x=top_labels_xpos, height=data['bike'], bottom=data[['transit', 'car', 'cav', 'rh', 'rhp']].sum(axis=1), color=mode_colors['Bike'])
plt_walk = plt.bar(x=top_labels_xpos, height=data['walk'], bottom=data[['transit', 'car', 'cav', 'rh', 'rhp', 'bike']].sum(axis=1), color=mode_colors['Walk'])
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_transit, plt_car, plt_cav, plt_rh, plt_rhp, plt_bike, plt_walk),
('Transit', 'Car', 'CAV', 'Ridehail', 'Ridehail Pool', 'Bike', 'Walk'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
ax = plt.gca()
ax.grid(False)
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], 1.02, top_labels[ind], ha='center')
ax.set_ylim((0, 1.0))
plt.ylabel('Portion of Trips')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def createColumnIfNotExist(df, name, value):
if name not in df.columns:
df[name] = value
else:
df[name].fillna(0, inplace=True)
def tableSummary(_plt_setup2, _output_folder):
factor = _plt_setup2['expansion_factor']
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_csv = '{}/makeplots/{}.summary.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'VMT_car_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV', 0)
tot_vmt_transit = (df['VMT_bus'].values+df['VMT_cable_car'].values+df['VMT_ferry'].values+df['VMT_rail'].values +
df['VMT_subway'].values+df['VMT_tram'].values)
tot_vmt_non_transit = (df['VMT_car'].values+df['VMT_car_CAV'].values+df['VMT_car_RH'].values +
df['VMT_car_RH_CAV'].values + df['VMT_walk'].values+df['VMT_bike'].values)
tot_ldv_vmt = (df['VMT_car'].values+df['VMT_car_CAV'].values+df['VMT_car_RH'].values + df['VMT_car_RH_CAV'].values)
createColumnIfNotExist(df, 'personTravelTime_cav', 0)
tot_pht = (df['personTravelTime_bike'].values+df['personTravelTime_car'].values+df['personTravelTime_cav'].values +
df['personTravelTime_drive_transit'].values+df['personTravelTime_mixed_mode'].values +
df['personTravelTime_onDemandRide'].values+df['personTravelTime_onDemandRide_pooled'].values +
df['personTravelTime_onDemandRide_transit'].values+df['personTravelTime_walk'].values +
df['personTravelTime_walk_transit'].values) * factor
tot_energy = (df['totalEnergy_Biodiesel'].values+df['totalEnergy_Diesel'].values +
df['totalEnergy_Electricity'].values+df['totalEnergy_Gasoline'].values) * factor
data = pd.DataFrame(
{'VMT Total (10^6)': (tot_vmt_transit + tot_vmt_non_transit * factor) / 1000000,
'VMT per Capita': (tot_vmt_transit+tot_vmt_non_transit)/df['population'],
'VMT Light Duty Total (10^6)': tot_ldv_vmt * factor / 1000000,
'VMT Light Duty per Capita': tot_ldv_vmt/df['population'],
'Driving Speed [miles/h]': tot_ldv_vmt/df['total_vehicleHoursTravelled_LightDutyVehicles'],
'Person Hours (10^6)': tot_pht / 60 / 1000000,
'PEV (%)': 0,
'Vehicle Energy (GJ)': tot_energy / 1000000000,
'MEP': 0
})
data['Scenario'] = df['Scenario'].values.copy()
data['Technology'] = df['Technology'].values.copy()
data['year'] = _plt_setup2['scenarios_year']
data.to_csv(output_csv)
def pltLdvRhOccupancy(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
factor = _plt_setup2['expansion_factor']
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.ldv_rh_occupancy.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.ldv_rh_occupancy.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'PMT_car_CAV', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV_shared', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV_shared_2p', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV_shared_3p', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV_shared_4p', 0)
createColumnIfNotExist(df, 'VMT_car_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV', 0)
data = pd.DataFrame(
{
'non_rh_ldv': df[['PMT_car', 'PMT_car_CAV']].sum(axis=1),
'rh_1p': df[['PMT_car_RH', 'PMT_car_RH_CAV']].sum(axis=1)-df[['PMT_car_RH_shared', 'PMT_car_RH_CAV_shared']].sum(axis=1),
'rh_2p': df[['PMT_car_RH_shared_2p', 'PMT_car_RH_CAV_shared_2p']].sum(axis=1),
'rh_3p': df[['PMT_car_RH_shared_3p', 'PMT_car_RH_CAV_shared_3p']].sum(axis=1),
'rh_4p': df[['PMT_car_RH_shared_4p', 'PMT_car_RH_CAV_shared_4p']].sum(axis=1)
})
data = data.div(df[['VMT_car', 'VMT_car_CAV', 'VMT_car_RH', 'VMT_car_RH_CAV']].sum(axis=1), axis=0)
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_non_rh_ldv = plt.bar(x=top_labels_xpos, height=data['non_rh_ldv'], color=colors['grey'])
plt_rh_1p = plt.bar(x=top_labels_xpos, height=data['rh_1p'], bottom=data['non_rh_ldv'], color=mode_colors['RH'])
plt_rh_shared = plt.bar(x=top_labels_xpos, height=data[['rh_2p', 'rh_3p', 'rh_4p']].sum(axis=1), bottom=data[['non_rh_ldv', 'rh_1p']].sum(axis=1), color=mode_colors['RHP'])
plt.bar(x=top_labels_xpos, height=data['rh_2p'], bottom=data[['non_rh_ldv', 'rh_1p']].sum(axis=1), hatch='xxx', fill=False, linewidth=0)
plt.bar(x=top_labels_xpos, height=data['rh_3p'], bottom=data[['non_rh_ldv', 'rh_1p', 'rh_2p']].sum(axis=1), hatch='|||', fill=False, linewidth=0)
plt.bar(x=top_labels_xpos, height=data['rh_4p'], bottom=data[['non_rh_ldv', 'rh_1p', 'rh_2p', 'rh_3p']].sum(axis=1), hatch='....', fill=False, linewidth=0)
shared_2p = mpatches.Patch(facecolor='white', label='The white data', hatch='xxx')
shared_3p = mpatches.Patch(facecolor='white', label='The white data', hatch='|||')
shared_4p = mpatches.Patch(facecolor='white', label='The white data', hatch='....')
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_non_rh_ldv, plt_rh_1p, plt_rh_shared, shared_2p, shared_3p, shared_4p),
('non-Ridehail LDV', 'Ridehail', 'Ridehail Pool', '2 passengers', '3 passengers', '4+ passengers'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
plt.axhline(y=1.0, color='black', linestyle='dashed', lw=0.5, alpha=0.2)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Distance Based Occupancy')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltLdvRhOccupancyByVMT(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
factor = _plt_setup2['expansion_factor']
scale = 1 / 1000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.ldv_rh_occupancy_vmt.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.ldv_rh_occupancy_vmt.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'VMT_car_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_empty', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_empty', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared_2p', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared_3p', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared_4p', 0)
data = pd.DataFrame(
{
'car': (df[['VMT_car', 'VMT_car_CAV']].sum(axis=1)-df[['VMT_car_shared', 'VMT_car_CAV_shared']].sum(axis=1)) * factor * scale,
'car_shared': df[['VMT_car_shared', 'VMT_car_CAV_shared']].sum(axis=1) * factor * scale,
'rh': (df[['VMT_car_RH', 'VMT_car_RH_CAV']].sum(axis=1)-df[['VMT_car_RH_shared', 'VMT_car_RH_CAV_shared']].sum(axis=1)) * factor * scale,
'rh_2p': df[['VMT_car_RH_shared_2p', 'VMT_car_RH_CAV_shared_2p']].sum(axis=1) * factor * scale,
'rh_3p': df[['VMT_car_RH_shared_3p', 'VMT_car_RH_CAV_shared_3p']].sum(axis=1) * factor * scale,
'rh_4p': df[['VMT_car_RH_shared_4p', 'VMT_car_RH_CAV_shared_4p']].sum(axis=1) * factor * scale
})
height_all = data.sum(axis=1)
data['car_empty'] = df[['VMT_car_empty', 'VMT_car_CAV_empty']].sum(axis=1) * factor * scale
data['rh_empty'] = df[['VMT_car_RH_empty', 'VMT_car_RH_CAV_empty']].sum(axis=1) * factor * scale
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_car = plt.bar(x=top_labels_xpos, height=data['car'], color=mode_colors['Car'])
plt.bar(x=top_labels_xpos, height=-data['car_empty'], bottom=data['car'], hatch='///', fill=False, linewidth=0)
plt_car_shared = plt.bar(x=top_labels_xpos, height=data['car_shared'], bottom=data['car'], color=mode_colors['CAV'])
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], bottom=data[['car', 'car_shared']].sum(axis=1), color=mode_colors['RH'])
plt.bar(x=top_labels_xpos, height=-data['rh_empty'], bottom=data[['car', 'car_shared', 'rh']].sum(axis=1), hatch='///', fill=False, linewidth=0)
plt_rh_shared = plt.bar(x=top_labels_xpos, height=data[['rh_2p', 'rh_3p', 'rh_4p']].sum(axis=1), bottom=data[['car', 'car_shared', 'rh']].sum(axis=1), color=mode_colors['RHP'])
plt.bar(x=top_labels_xpos, height=data['rh_2p'], bottom=data[['car', 'car_shared', 'rh']].sum(axis=1), hatch='xxx', fill=False, linewidth=0)
plt.bar(x=top_labels_xpos, height=data['rh_3p'], bottom=data[['car', 'car_shared', 'rh', 'rh_2p']].sum(axis=1), hatch='|||', fill=False, linewidth=0)
plt.bar(x=top_labels_xpos, height=data['rh_4p'], bottom=data[['car', 'car_shared', 'rh', 'rh_2p', 'rh_3p']].sum(axis=1), hatch='....', fill=False, linewidth=0)
empty = mpatches.Patch(facecolor='white', label='The white data', hatch='///')
shared_2p = mpatches.Patch(facecolor='white', label='The white data', hatch='xxx')
shared_3p = mpatches.Patch(facecolor='white', label='The white data', hatch='|||')
shared_4p = mpatches.Patch(facecolor='white', label='The white data', hatch='....')
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_car, plt_car_shared, plt_rh, plt_rh_shared, shared_2p, shared_3p, shared_4p, empty),
('Car/CAV', 'CAV Shared', 'Ridehail', 'Ridehail Pool', '2 passengers', '3 passengers', '4+ passengers', 'Deadheading'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
plt.axhline(y=1.0, color='black', linestyle='dashed', lw=0.5, alpha=0.2)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Light Duty Vehicle Miles Traveled (millions)')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltLdvPersonHourTraveled(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
factor = _plt_setup2['expansion_factor']
scale = 1 / 1000000 / 60
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.ldv_person_hours_traveled.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.ldv_person_hours_traveled.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'personTravelTime_cav', 0)
data = pd.DataFrame(
{'car': df['personTravelTime_car'].values * factor * scale,
'cav': df['personTravelTime_cav'].values * factor * scale,
'rh': df['personTravelTime_onDemandRide'].values * factor * scale,
'rhp': df['personTravelTime_onDemandRide_pooled'].values * factor * scale
})
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_car = plt.bar(x=top_labels_xpos, height=data['car'], color=mode_colors['Car'])
plt_cav = plt.bar(x=top_labels_xpos, height=data['cav'], bottom=data['car'], color=mode_colors['CAV'])
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], bottom=data[['car', 'cav']].sum(axis=1), color=mode_colors['RH'])
plt_rhp = plt.bar(x=top_labels_xpos, height=data['rhp'], bottom=data[['car', 'cav', 'rh']].sum(axis=1), color=mode_colors['RHP'])
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_car, plt_cav, plt_rh, plt_rhp),
('Car', 'CAV', 'Ridehail', 'Ridehail Pool'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Person Hours Traveled in LDV (millions)')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltModeSplitInPMT(_plt_setup2, _output_folder):
pltModeSplitInPMT_internal(_plt_setup2, _output_folder,_plt_setup2['expansion_factor'],'modesplit_pmt',1 / 1000000,'Person Miles Traveled (millions)')
def pltModeSplitInPMTPerCapita(_plt_setup2, _output_folder):
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
pltModeSplitInPMT_internal(_plt_setup2, _output_folder,1/df['population'].values,'modesplit_pmt_per_capita',1,'Person Miles Traveled')
def pltModeSplitInPMT_internal(_plt_setup2, _output_folder,factor,fileNameLabel,scale,ylabel):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
#factor = _plt_setup2['expansion_factor']
#scale = 1 / 1000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.{}.png'.format(_output_folder, _plt_setup2['name'],fileNameLabel)
output_csv = '{}/makeplots/{}.{}.csv'.format(_output_folder, _plt_setup2['name'],fileNameLabel)
createColumnIfNotExist(df, 'PMT_car_CAV', 0)
createColumnIfNotExist(df, 'PMT_car_RH_CAV', 0)
data = pd.DataFrame(
{'transit': (df['PMT_bus'].values+df['PMT_ferry'].values+df['PMT_rail'].values+df['PMT_subway'].values+
df['PMT_tram'].values+df['PMT_cable_car'].values) * factor * scale,
'car': df['PMT_car'].values * factor * scale,
'cav': df['PMT_car_CAV'].values * factor * scale,
'rh': (df['PMT_car_RH'].values+df['PMT_car_RH_CAV'].values) * factor * scale,
'walk': df['PMT_walk'].values * factor * scale,
'bike': df['PMT_bike'].values * factor * scale
})
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_transit = plt.bar(x=top_labels_xpos, height=data['transit'], color=mode_colors['Transit'])
plt_car = plt.bar(x=top_labels_xpos, height=data['car'], bottom=data['transit'], color=mode_colors['Car'])
plt_cav = plt.bar(x=top_labels_xpos, height=data['cav'], bottom=data[['transit', 'car']].sum(axis=1), color=mode_colors['CAV'])
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], bottom=data[['transit', 'car', 'cav']].sum(axis=1), color=mode_colors['RH'])
plt_bike = plt.bar(x=top_labels_xpos, height=data['bike'], bottom=data[['transit', 'car', 'cav', 'rh']].sum(axis=1), color=mode_colors['Bike'])
plt_walk = plt.bar(x=top_labels_xpos, height=data['walk'], bottom=data[['transit', 'car', 'cav', 'rh', 'bike']].sum(axis=1), color=mode_colors['Walk'])
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_transit, plt_car, plt_cav, plt_rh, plt_bike, plt_walk),
('Transit', 'Car', 'CAV', 'Ridehail', 'Bike', 'Walk'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel(ylabel)
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltModeSplitInVMT(_plt_setup2, _output_folder):
pltModeSplitInVMT_internal(_plt_setup2, _output_folder,_plt_setup2['expansion_factor'],'modesplit_vmt',1 / 1000000,'Vehicle Miles Traveled (millions)',1)
def pltModeSplitInVMTPerCapita(_plt_setup2, _output_folder):
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
pltModeSplitInVMT_internal(_plt_setup2, _output_folder,1/df['population'].values,'modesplit_vmt_per_capita',1,'Vehicle Miles Traveled',1/_plt_setup2['expansion_factor']/df['population'].values)
def pltModeSplitInVMT_internal(_plt_setup2, _output_folder,factor,fileNameLabel,scale,ylabel,transitFactor):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
#factor = _plt_setup2['expansion_factor']
#scale = 1 / 1000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.{}.png'.format(_output_folder, _plt_setup2['name'],fileNameLabel)
output_csv = '{}/makeplots/{}.{}.csv'.format(_output_folder, _plt_setup2['name'],fileNameLabel)
createColumnIfNotExist(df, 'VMT_car_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_empty', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_empty', 0)
data = pd.DataFrame(
{'transit': (df['VMT_bus'].values+df['VMT_ferry'].values+df['VMT_rail'].values+df['VMT_subway'].values+
df['VMT_tram'].values+df['VMT_cable_car'].values) * scale * transitFactor,
'car': df['VMT_car'].values * factor * scale,
'cav': df['VMT_car_CAV'].values * factor * scale,
'rh': (df['VMT_car_RH'].values+df['VMT_car_RH_CAV'].values-df['VMT_car_RH_shared'].values-df['VMT_car_RH_CAV_shared'].values) * factor * scale,
'rhp':(df['VMT_car_RH_shared'].values + df['VMT_car_RH_CAV_shared'].values) * factor * scale,
'nm': (df['VMT_walk'].values+df['VMT_bike'].values) * factor * scale
})
height_all = data.sum(axis=1)
data['cav_empty'] = df['VMT_car_CAV_empty'].values * factor * scale
data['cav_shared'] = df['VMT_car_CAV_shared'].values * factor * scale
data['rh_empty'] = (df['VMT_car_RH_empty'].values + df['VMT_car_RH_CAV_empty'].values) * factor * scale
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_transit = plt.bar(x=top_labels_xpos, height=data['transit'], color=mode_colors['Transit'])
plt_car = plt.bar(x=top_labels_xpos, height=data['car'], bottom=data['transit'], color=mode_colors['Car'])
plt_cav = plt.bar(x=top_labels_xpos, height=data['cav'], bottom=data[['transit', 'car']].sum(axis=1), color=mode_colors['CAV'])
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], bottom=data[['transit', 'car', 'cav']].sum(axis=1), color=mode_colors['RH'])
plt_rhp = plt.bar(x=top_labels_xpos, height=data['rhp'], bottom=data[['transit', 'car', 'cav', 'rh']].sum(axis=1), color=mode_colors['RHP'])
plt_nm = plt.bar(x=top_labels_xpos, height=data['nm'], bottom=data[['transit', 'car', 'cav', 'rh', 'rhp']].sum(axis=1), color=mode_colors['NM'])
empty = mpatches.Patch(facecolor='white', label='The white data', hatch='///')
plt.bar(x=top_labels_xpos, height=-data['cav_empty'], bottom=data[['transit', 'car', 'cav']].sum(axis=1), hatch='///', fill=False, linewidth=0)
plt.bar(x=top_labels_xpos, height=-data['rh_empty'], bottom=data[['transit', 'car', 'cav', 'rh']].sum(axis=1), hatch='///', fill=False, linewidth=0)
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_transit, plt_car, plt_cav, plt_rh, plt_rhp, plt_nm, empty),
('Transit', 'Car', 'CAV', 'Ridehail', 'Ridehail Pool', 'NonMotorized', 'Deadheading'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel(ylabel)
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltLdvTechnologySplitInVMT(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
factor = _plt_setup2['expansion_factor']
scale = 1 / 1000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.ldv_technologysplit_vmt.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.ldv_technologysplit_vmt.csv'.format(_output_folder, _plt_setup2['name'])
data = pd.DataFrame(
{'L1': df['VMT_L1'].values * factor * scale,
'L3': df['VMT_L3'].values * factor * scale,
'L5': df['VMT_L5'].values * factor * scale
})
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_Low = plt.bar(x=top_labels_xpos, height=data['L1'])
plt_High = plt.bar(x=top_labels_xpos, height=data['L3'], bottom=data['L1'])
plt_CAV = plt.bar(x=top_labels_xpos, height=data['L5'], bottom=data[['L1', 'L3']].sum(axis=1))
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
plt.legend((plt_Low, plt_High, plt_CAV),
('No Automation', 'Partial Automation', 'Full Automation'),
labelspacing=-2.5, bbox_to_anchor=(1.05, 0.5), frameon=False)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Vehicle Miles Traveled (millions)')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltRHWaitTime(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.rh_wait_time.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.rh_wait_time.csv'.format(_output_folder, _plt_setup2['name'])
data = pd.DataFrame(
{'rh_wait_time': df['averageOnDemandRideWaitTimeInMin'].values.copy()
})
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt.bar(x=top_labels_xpos, height=data['rh_wait_time'], color=mode_colors['RH'])
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Average Ride Hail Wait (min)')
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltRHEmptyPooled(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
factor = _plt_setup2['expansion_factor']
scale = 1 / 1000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.rh_empty_shared.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.rh_empty_shared.csv'.format(_output_folder, _plt_setup2['name'])
createColumnIfNotExist(df, 'VMT_car_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_empty', 0)
createColumnIfNotExist(df, 'VMT_car_CAV_shared', 0)
createColumnIfNotExist(df, 'VMT_car_RH_CAV_empty', 0)
data = pd.DataFrame(
{'rh': (df['VMT_car_RH'].values+df['VMT_car_RH_CAV'].values-df['VMT_car_RH_shared'].values-df['VMT_car_RH_CAV_shared'].values) * factor * scale,
'rhp': (df['VMT_car_RH_shared'].values+df['VMT_car_RH_CAV_shared'].values) * factor * scale
})
#print(df['VMT_car_RH_CAV_shared'])
#print(data)
height_all = data.sum(axis=1)
data['rh_empty'] = (df['VMT_car_RH_empty'].values+df['VMT_car_RH_CAV_empty'].values) * factor * scale
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_rh = plt.bar(x=top_labels_xpos, height=data['rh'], color=mode_colors['RH'])
plt_rhp = plt.bar(x=top_labels_xpos, height=data['rhp'], bottom=data['rh'], color=mode_colors['RHP'])
plt.bar(x=top_labels_xpos, height=-data['rh_empty'], bottom=data['rh'], hatch='///', fill=False, lw=0)
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
empty = mpatches.Patch(facecolor='white', label='The white data', hatch='///')
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Ridehail Vehicle Miles Traveled (millions)')
plt.legend((plt_rh, plt_rhp, empty),
('Ridehail', 'Ridehail Pool', 'Deadheading'),
bbox_to_anchor=(1.05, 0.5), frameon=False)
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
def pltEnergyPerCapita(_plt_setup2, _output_folder):
plot_size = _plt_setup2['plot_size']
top_labels = _plt_setup2['top_labels']
bottom_labels = _plt_setup2['bottom_labels']
nb_scenarios = len(_plt_setup2['scenarios_id'])
scale = 1 / 1000000000
angle = 12
(df, top_labels_xpos, bottom_labels_xpos) = getDfForPlt(_plt_setup2, _output_folder)
output_png = '{}/makeplots/{}.energy_source_percapita.png'.format(_output_folder, _plt_setup2['name'])
output_csv = '{}/makeplots/{}.energy_source_percapita.csv'.format(_output_folder, _plt_setup2['name'])
data = pd.DataFrame(
{'gas': (df['totalEnergy_Gasoline'].values / df['population'].values) * scale,
'diesel': (df['totalEnergy_Diesel'].values / df['population'].values) * scale,
'electricity': (df['totalEnergy_Electricity'].values / df['population'].values) * scale
})
height_all = data.sum(axis=1)
data['scenario'] = df['Scenario'].values.copy()
data['technology'] = df['Technology'].values.copy()
data.to_csv(output_csv)
plt.figure(figsize=plot_size)
plt_Gas = plt.bar(x=top_labels_xpos, height=data['gas'], color=mode_colors['gas'])
plt_Diesel = plt.bar(x=top_labels_xpos, height=data['diesel'], bottom=data['gas'], color=mode_colors['diesel'])
plt_Electricity = plt.bar(x=top_labels_xpos, height=data['electricity'], bottom=data[['gas', 'diesel']].sum(axis=1), color=mode_colors['electricity'])
plt.xticks(bottom_labels_xpos, bottom_labels, rotation=angle)
ax = plt.gca()
ax.grid(False)
max_value = max(height_all)
ax.set_ylim((0, max_value))
for ind in range(nb_scenarios):
plt.text(top_labels_xpos[ind], max_value + 0.02*max_value, top_labels[ind], ha='center')
plt.ylabel('Light Duty Vehicle Energy per Capita (GJ)')
plt.legend((plt_Electricity, plt_Diesel, plt_Gas),
('Electricity', 'Diesel', 'Gasoline'), bbox_to_anchor=(1.05, 0.5), frameon=False)
plt.savefig(output_png, transparent=True, bbox_inches='tight', dpi=200, facecolor='white')
plt.clf()
plt.close()
| 54.636364 | 197 | 0.676774 |
79568cd059780dfd11f62dc6bdd5fb83d72b9710 | 683 | py | Python | AC02/payRent.py | rafaxtd/URI-Judge | 37a5eae0debd87f6a74323a64e98d9fb8e18d976 | [
"MIT"
] | null | null | null | AC02/payRent.py | rafaxtd/URI-Judge | 37a5eae0debd87f6a74323a64e98d9fb8e18d976 | [
"MIT"
] | null | null | null | AC02/payRent.py | rafaxtd/URI-Judge | 37a5eae0debd87f6a74323a64e98d9fb8e18d976 | [
"MIT"
] | null | null | null | rent = int(input())
pay = int(input())
payment = 1
if rent > pay:
while rent != 0:
print(f'pagamento: {payment}')
print(f'antes = {rent}')
rent -= pay
print(f'depois = {rent}')
print('-----')
payment += 1
if rent == 0:
break
if rent < pay:
print(f'pagamento: {payment}')
print(f'antes = {rent}')
rent = 0
print(f'depois = {rent}')
print('-----')
else:
print(f'pagamento: {payment}')
print(f'antes = {rent}')
rent = 0
print(f'depois = {rent}')
print('-----')
| 16.261905 | 42 | 0.401171 |
79568d2cdac6268f8ea2755b8776750a55769bd1 | 2,115 | py | Python | pyiron_base/job/external.py | pmrv/pyiron_base | af1729708a8226575ca2c84f574e7cb046b7f7cd | [
"BSD-3-Clause"
] | null | null | null | pyiron_base/job/external.py | pmrv/pyiron_base | af1729708a8226575ca2c84f574e7cb046b7f7cd | [
"BSD-3-Clause"
] | null | null | null | pyiron_base/job/external.py | pmrv/pyiron_base | af1729708a8226575ca2c84f574e7cb046b7f7cd | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import print_function
import json
from pathlib2 import Path
import warnings
from pyiron_base.generic.hdfio import FileHDFio
from pyiron_base.generic.parameters import GenericParameters
"""
Load input parameters for jupyter notebooks from external HDF5 or JSON file
"""
__author__ = "Osamu Waseda"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2019"
class Notebook(object):
"""
class for pyiron notebook objects
"""
@staticmethod
def get_custom_dict():
folder = Path(".").cwd().parts[-1]
project_folder = Path(".").cwd().parents[1]
hdf_file = project_folder / folder
hdf_file = str(hdf_file) + ".h5"
if Path(hdf_file).exists():
hdf = FileHDFio(hdf_file)
custom_dict = GenericParameters()
for k, v in zip(
hdf[folder + "/input/custom_dict/data_dict"]["Parameter"],
hdf[folder + "/input/custom_dict/data_dict"]["Value"],
):
custom_dict[k] = v
custom_dict["project_dir"] = str(project_folder)
return custom_dict
elif Path("input.json").exists():
with open("input.json") as f:
return json.load(f)
else:
warnings.warn("{} not found".format(hdf_file))
return None
@staticmethod
def store_custom_output_dict(output_dict):
folder = Path(".").cwd().parts[-1]
hdf_file = Path(".").cwd().parents[1] / folder
hdf_file = str(hdf_file) + ".h5"
hdf = FileHDFio(hdf_file)
hdf[folder].create_group("output")
for k, v in output_dict.items():
hdf[folder + "/output"][k] = v
| 32.045455 | 108 | 0.629314 |
79568d93c8ad75366e34dc2c187606b40cb3dfda | 661 | py | Python | every_election/apps/organisations/migrations/0017_auto_20170113_1625.py | DemocracyClub/EveryElection | da8a561cb4a84d9b432b1508a68f8cfada3d9515 | [
"BSD-3-Clause"
] | 8 | 2017-06-29T10:11:33.000Z | 2019-12-16T16:17:51.000Z | every_election/apps/organisations/migrations/0017_auto_20170113_1625.py | DemocracyClub/EveryElection | da8a561cb4a84d9b432b1508a68f8cfada3d9515 | [
"BSD-3-Clause"
] | 1,300 | 2017-01-08T14:02:24.000Z | 2022-03-31T09:11:30.000Z | every_election/apps/organisations/migrations/0017_auto_20170113_1625.py | DemocracyClub/EveryElection | da8a561cb4a84d9b432b1508a68f8cfada3d9515 | [
"BSD-3-Clause"
] | 11 | 2017-02-04T10:48:04.000Z | 2021-01-27T15:07:55.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-13 16:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("organisations", "0016_auto_20170111_1741")]
operations = [
migrations.AlterField(
model_name="organisationdivision",
name="geography_curie",
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name="organisationdivision",
name="seats_total",
field=models.IntegerField(blank=True, null=True),
),
]
| 27.541667 | 65 | 0.638427 |
79568e6cafa611a43bc208a9e4a9f8ea67965200 | 3,331 | py | Python | test/functional/wallet_zapwallettxes.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/wallet_zapwallettxes.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | test/functional/wallet_zapwallettxes.py | bitcoin-money/bitcoinmoney | d208756366292fa7ab16b8f4b9f2dff9b8567bee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Money developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the zapwallettxes functionality.
- start two bitcoind nodes
- create two transactions on node 0 - one is confirmed and one is unconfirmed.
- restart node 0 and verify that both the confirmed and the unconfirmed
transactions are still available.
- restart node 0 with zapwallettxes and persistmempool, and verify that both
the confirmed and the unconfirmed transactions are still available.
- restart node 0 with just zapwallettxes and verify that the confirmed
transactions are still available, but that the unconfirmed transaction has
been zapped.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
wait_until,
)
class ZapWalletTXesTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(100)
self.sync_all()
# This transaction will be confirmed
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10)
self.nodes[0].generate(1)
self.sync_all()
# This transaction will not be confirmed
txid2 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
# Confirmed and unconfirmed transactions are now in the wallet.
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop-start node0. Both confirmed and unconfirmed transactions remain in the wallet.
self.stop_node(0)
self.start_node(0)
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes and persistmempool. The unconfirmed
# transaction is zapped from the wallet, but is re-added when the mempool is reloaded.
self.stop_node(0)
self.start_node(0, ["-persistmempool=1", "-zapwallettxes=2"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['size'] == 1, timeout=3)
self.nodes[0].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
assert_equal(self.nodes[0].gettransaction(txid2)['txid'], txid2)
# Stop node0 and restart with zapwallettxes, but not persistmempool.
# The unconfirmed transaction is zapped and is no longer in the wallet.
self.stop_node(0)
self.start_node(0, ["-zapwallettxes=2"])
# tx1 is still be available because it was confirmed
assert_equal(self.nodes[0].gettransaction(txid1)['txid'], txid1)
# This will raise an exception because the unconfirmed transaction has been zapped
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', self.nodes[0].gettransaction, txid2)
if __name__ == '__main__':
ZapWalletTXesTest().main()
| 41.6375 | 112 | 0.707896 |
79568e6e8a3deb70b36dc16dc90db96dff140dc5 | 551 | py | Python | breached.py | zenux-dev/Cyheck | b52309085be8735b8dad39e8316dd6e6b3cc2e03 | [
"MIT"
] | null | null | null | breached.py | zenux-dev/Cyheck | b52309085be8735b8dad39e8316dd6e6b3cc2e03 | [
"MIT"
] | null | null | null | breached.py | zenux-dev/Cyheck | b52309085be8735b8dad39e8316dd6e6b3cc2e03 | [
"MIT"
] | 1 | 2021-09-14T05:36:35.000Z | 2021-09-14T05:36:35.000Z | import requests
word = 'However, someone else could have signed up using your email address. Read the explanation below.'
word2 = 'Oh no! Your email address has been leaked'
def check(email):
url = 'https://ashley.cynic.al/'
payload = {'email': email}
x = requests.post(url, data = payload)
check.breached = word in x.text
url2 = 'https://check.cybernews.com/chk/'
payload2 = {'e': email, 'lang': 'en_US'}
z = requests.post(url2, data = payload2)
check.breached2 = word2 in z.text
| 29 | 105 | 0.624319 |
79568f0a7d39ee78389d8c70e1d8967af18f492d | 6,537 | py | Python | bagpy-sbg/sbg_genpy/_SbgMagStatus.py | sergiobellido/bagpy | ba855b585c96442c21226dd4aa8bd12900fdbcf4 | [
"MIT"
] | null | null | null | bagpy-sbg/sbg_genpy/_SbgMagStatus.py | sergiobellido/bagpy | ba855b585c96442c21226dd4aa8bd12900fdbcf4 | [
"MIT"
] | null | null | null | bagpy-sbg/sbg_genpy/_SbgMagStatus.py | sergiobellido/bagpy | ba855b585c96442c21226dd4aa8bd12900fdbcf4 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from sbg_driver/SbgMagStatus.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SbgMagStatus(genpy.Message):
_md5sum = "057cf294623d5a0b037fdcc47f99e3c4"
_type = "sbg_driver/SbgMagStatus"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# SBG Ellipse Messages
# Submessage
# True if the magnetometer X has passed the self test.
bool mag_x
# True if the magnetometer Y has passed the self test.
bool mag_y
# True if the magnetometer Z has passed the self test.
bool mag_z
# True if the accelerometer X has passed the self test.
bool accel_x
# True if the accelerometer Y has passed the self test.
bool accel_y
# True if the accelerometer Z has passed the self test.
bool accel_z
# True if magnetometer is not saturated
bool mags_in_range
# True if accelerometer is not saturated
bool accels_in_range
# True if magnetometer seems to be calibrated
bool calibration
"""
__slots__ = ['mag_x','mag_y','mag_z','accel_x','accel_y','accel_z','mags_in_range','accels_in_range','calibration']
_slot_types = ['bool','bool','bool','bool','bool','bool','bool','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
mag_x,mag_y,mag_z,accel_x,accel_y,accel_z,mags_in_range,accels_in_range,calibration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SbgMagStatus, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.mag_x is None:
self.mag_x = False
if self.mag_y is None:
self.mag_y = False
if self.mag_z is None:
self.mag_z = False
if self.accel_x is None:
self.accel_x = False
if self.accel_y is None:
self.accel_y = False
if self.accel_z is None:
self.accel_z = False
if self.mags_in_range is None:
self.mags_in_range = False
if self.accels_in_range is None:
self.accels_in_range = False
if self.calibration is None:
self.calibration = False
else:
self.mag_x = False
self.mag_y = False
self.mag_z = False
self.accel_x = False
self.accel_y = False
self.accel_z = False
self.mags_in_range = False
self.accels_in_range = False
self.calibration = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_9B().pack(_x.mag_x, _x.mag_y, _x.mag_z, _x.accel_x, _x.accel_y, _x.accel_z, _x.mags_in_range, _x.accels_in_range, _x.calibration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.mag_x, _x.mag_y, _x.mag_z, _x.accel_x, _x.accel_y, _x.accel_z, _x.mags_in_range, _x.accels_in_range, _x.calibration,) = _get_struct_9B().unpack(str[start:end])
self.mag_x = bool(self.mag_x)
self.mag_y = bool(self.mag_y)
self.mag_z = bool(self.mag_z)
self.accel_x = bool(self.accel_x)
self.accel_y = bool(self.accel_y)
self.accel_z = bool(self.accel_z)
self.mags_in_range = bool(self.mags_in_range)
self.accels_in_range = bool(self.accels_in_range)
self.calibration = bool(self.calibration)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_9B().pack(_x.mag_x, _x.mag_y, _x.mag_z, _x.accel_x, _x.accel_y, _x.accel_z, _x.mags_in_range, _x.accels_in_range, _x.calibration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.mag_x, _x.mag_y, _x.mag_z, _x.accel_x, _x.accel_y, _x.accel_z, _x.mags_in_range, _x.accels_in_range, _x.calibration,) = _get_struct_9B().unpack(str[start:end])
self.mag_x = bool(self.mag_x)
self.mag_y = bool(self.mag_y)
self.mag_z = bool(self.mag_z)
self.accel_x = bool(self.accel_x)
self.accel_y = bool(self.accel_y)
self.accel_z = bool(self.accel_z)
self.mags_in_range = bool(self.mags_in_range)
self.accels_in_range = bool(self.accels_in_range)
self.calibration = bool(self.calibration)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_9B = None
def _get_struct_9B():
global _struct_9B
if _struct_9B is None:
_struct_9B = struct.Struct("<9B")
return _struct_9B
| 35.145161 | 169 | 0.679058 |
79568f741f8a8896bfd1bec89b17e0a06218aa15 | 521 | py | Python | pdi/pdi.py | rodriguesfas/Vision-Computer | 9621f1020ea5f2f4dcabbb48e7fe2a4ef69c0ba5 | [
"MIT"
] | null | null | null | pdi/pdi.py | rodriguesfas/Vision-Computer | 9621f1020ea5f2f4dcabbb48e7fe2a4ef69c0ba5 | [
"MIT"
] | null | null | null | pdi/pdi.py | rodriguesfas/Vision-Computer | 9621f1020ea5f2f4dcabbb48e7fe2a4ef69c0ba5 | [
"MIT"
] | 1 | 2018-09-21T18:49:44.000Z | 2018-09-21T18:49:44.000Z | #coding: utf-8
#!/usr/bin/python
'''
REFERÊNCIAS:
<http://acodigo.blogspot.com.br/2013/05/procesamiento-de-imagenes-en-opencv.html>
@Author RodriguesFAS
@Date 26/09/2017
@Email <fasr@cin.ufpe.br> || <franciscosouzaacer@gmail.com>
@site <htpp://rodriguesfas.com.br>
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
src = 'src/lena.jpg'
img_org = cv2.imread(src)
t, dst1 = cv2.GaussianBlur(img, dst, Size(13,7), 8)
cv2.imshow('GaussianBlur', dst1)
cv2.waitKey(0) | 17.965517 | 82 | 0.725528 |
795692b10bfcc8f4d21886b5beeb1ddb37b91738 | 24,534 | py | Python | src/sage/geometry/newton_polygon.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | 3 | 2016-06-19T14:48:31.000Z | 2022-01-28T08:46:01.000Z | src/sage/geometry/newton_polygon.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/newton_polygon.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | 7 | 2021-11-08T10:01:59.000Z | 2022-03-03T11:25:52.000Z | """
Newton Polygons
This module implements finite Newton polygons and
infinite Newton polygons having a finite number of
slopes (and hence a last infinite slope).
"""
#############################################################################
# Copyright (C) 2013 Xavier Caruso <xavier.caruso@normalesup.org>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#############################################################################
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.structure.element import Element
from sage.structure.richcmp import op_EQ, op_NE, op_LE, op_GE, op_LT
from sage.misc.cachefunc import cached_method
from sage.rings.infinity import Infinity
from sage.geometry.polyhedron.constructor import Polyhedron
from sage.geometry.polyhedron.base import is_Polyhedron
class NewtonPolygon_element(Element):
"""
Class for infinite Newton polygons with last slope.
"""
def __init__(self, polyhedron, parent):
"""
Initialize a Newton polygon.
INPUT:
- polyhedron -- a polyhedron defining the Newton polygon
TESTS:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NewtonPolygon([ (0,0), (1,1), (3,5) ])
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 5)
sage: NewtonPolygon([ (0,0), (1,1), (2,8), (3,5) ], last_slope=3)
Infinite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 5) ending by an infinite line of slope 3
::
sage: TestSuite(NewtonPolygon).run()
"""
Element.__init__(self, parent)
self._polyhedron = polyhedron
self._vertices = None
def _repr_(self):
"""
Return a string representation of this Newton polygon.
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,5) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 5)
sage: NP._repr_()
'Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 5)'
"""
vertices = self.vertices()
length = len(vertices)
if self.last_slope() is Infinity:
if length == 0:
return "Empty Newton polygon"
elif length == 1:
return "Finite Newton polygon with 1 vertex: %s" % str(vertices[0])
else:
return "Finite Newton polygon with %s vertices: %s" % (length, str(vertices)[1:-1])
else:
if length == 1:
return "Newton Polygon consisting of a unique infinite line of slope %s starting at %s" % (self.last_slope(), str(vertices[0]))
else:
return "Infinite Newton polygon with %s vertices: %s ending by an infinite line of slope %s" % (length, str(vertices)[1:-1], self.last_slope())
def vertices(self, copy=True):
"""
Returns the list of vertices of this Newton polygon
INPUT:
- ``copy`` -- a boolean (default: ``True``)
OUTPUT:
The list of vertices of this Newton polygon (or a copy of it
if ``copy`` is set to True)
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,5) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 5)
sage: v = NP.vertices(); v
[(0, 0), (1, 1), (2, 5)]
TESTS:
sage: del v[0]
sage: v
[(1, 1), (2, 5)]
sage: NP.vertices()
[(0, 0), (1, 1), (2, 5)]
"""
if self._vertices is None:
self._vertices = [ tuple(v) for v in self._polyhedron.vertices() ]
self._vertices.sort()
if copy:
return list(self._vertices)
else:
return self._vertices
@cached_method
def last_slope(self):
"""
Returns the last (infinite) slope of this Newton polygon
if it is infinite and ``+Infinity`` otherwise.
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP1 = NewtonPolygon([ (0,0), (1,1), (2,8), (3,5) ], last_slope=3)
sage: NP1.last_slope()
3
sage: NP2 = NewtonPolygon([ (0,0), (1,1), (2,5) ])
sage: NP2.last_slope()
+Infinity
We check that the last slope of a sum (resp. a produit) is the
minimum of the last slopes of the summands (resp. the factors)::
sage: (NP1 + NP2).last_slope()
3
sage: (NP1 * NP2).last_slope()
3
"""
rays = self._polyhedron.rays()
for r in rays:
if r[0] > 0:
return r[1]/r[0]
return Infinity
def slopes(self, repetition=True):
"""
Returns the slopes of this Newton polygon
INPUT:
- ``repetition`` -- a boolean (default: ``True``)
OUTPUT:
The consecutive slopes (not including the last slope
if the polygon is infinity) of this Newton polygon.
If ``repetition`` is True, each slope is repeated a number of
times equal to its length. Otherwise, it appears only one time.
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (3,6) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 6)
sage: NP.slopes()
[1, 5/2, 5/2]
sage: NP.slopes(repetition=False)
[1, 5/2]
"""
slopes = [ ]
vertices = self.vertices(copy=False)
for i in range(1,len(vertices)):
dx = vertices[i][0] - vertices[i-1][0]
dy = vertices[i][1] - vertices[i-1][1]
slope = dy/dx
if repetition:
slopes.extend(dx * [slope])
else:
slopes.append(slope)
return slopes
def _add_(self, other):
"""
Returns the convex hull of ``self`` and ``other``
INPUT:
- ``other`` -- a Newton polygon
OUTPUT:
The Newton polygon, which is the convex hull of this Newton polygon and ``other``
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP1 = NewtonPolygon([ (0,0), (1,1), (2,6) ]); NP1
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 6)
sage: NP2 = NewtonPolygon([ (0,0), (1,3/2) ], last_slope=2); NP2
Infinite Newton polygon with 2 vertices: (0, 0), (1, 3/2) ending by an infinite line of slope 2
sage: NP1 + NP2
Infinite Newton polygon with 2 vertices: (0, 0), (1, 1) ending by an infinite line of slope 2
"""
polyhedron = self._polyhedron.convex_hull(other._polyhedron)
return self.parent()(polyhedron)
def _mul_(self, other):
"""
Returns the Minkowski sum of ``self`` and ``other``
INPUT:
- ``other`` -- a Newton polygon
OUTPUT:
The Newton polygon, which is the Minkowski sum of this Newton polygon and ``other``.
NOTE::
If ``self`` and ``other`` are respective Newton polygons of some polynomials
`f` and `g` the self*other is the Newton polygon of the product `fg`
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP1 = NewtonPolygon([ (0,0), (1,1), (2,6) ]); NP1
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 6)
sage: NP2 = NewtonPolygon([ (0,0), (1,3/2) ], last_slope=2); NP2
Infinite Newton polygon with 2 vertices: (0, 0), (1, 3/2) ending by an infinite line of slope 2
sage: NP = NP1 * NP2; NP
Infinite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 5/2) ending by an infinite line of slope 2
The slopes of ``NP`` is the union of thos of ``NP1`` and those of ``NP2``
which are less than the last slope::
sage: NP1.slopes()
[1, 5]
sage: NP2.slopes()
[3/2]
sage: NP.slopes()
[1, 3/2]
"""
polyhedron = self._polyhedron.Minkowski_sum(other._polyhedron)
return self.parent()(polyhedron)
def __pow__(self, exp, ignored=None):
"""
Returns ``self`` dilated by ``exp``
INPUT:
- ``exp`` -- a positive integer
OUTPUT:
This Newton polygon scaled by a factor ``exp``.
NOTE::
If ``self`` is the Newton polygon of a polynomial `f`, then
``self^exp`` is the Newton polygon of `f^{exp}`.
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,6) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 6)
sage: NP^10
Finite Newton polygon with 3 vertices: (0, 0), (10, 10), (20, 60)
"""
polyhedron = self._polyhedron.dilation(exp)
return self.parent()(polyhedron)
def __lshift__(self, i):
"""
Returns ``self`` shifted by `(0,i)`
INPUT:
- ``i`` -- a rational number
OUTPUT:
This Newton polygon shifted by the vector `(0,i)`
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,6) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 6)
sage: NP << 2
Finite Newton polygon with 3 vertices: (0, 2), (1, 3), (2, 8)
"""
polyhedron = self._polyhedron.translation((0,i))
return self.parent()(polyhedron)
def __rshift__(self, i):
"""
Returns ``self`` shifted by `(0,-i)`
INPUT:
- ``i`` -- a rational number
OUTPUT:
This Newton polygon shifted by the vector `(0,-i)`
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,6) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (2, 6)
sage: NP >> 2
Finite Newton polygon with 3 vertices: (0, -2), (1, -1), (2, 4)
"""
polyhedron = self._polyhedron.translation((0,-i))
return self.parent()(polyhedron)
def __call__(self, x):
"""
Returns `self(x)`
INPUT:
- ``x`` -- a real number
OUTPUT:
The value of this Newton polygon at abscissa `x`
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (3,6) ]); NP
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 6)
sage: [ NP(i) for i in range(4) ]
[0, 1, 7/2, 6]
"""
# complexity: O(log(n))
from sage.functions.other import floor
vertices = self.vertices()
lastslope = self.last_slope()
if len(vertices) == 0 or x < vertices[0][0]:
return Infinity
if x == vertices[0][0]:
return vertices[0][1]
if x == vertices[-1][0]:
return vertices[-1][1]
if x > vertices[-1][0]:
return vertices[-1][1] + lastslope * (x - vertices[-1][0])
a = 0; b = len(vertices)
while b - a > 1:
c = floor((a+b)/2)
if vertices[c][0] < x:
a = c
else:
b = c
(xg,yg) = vertices[a]
(xd,yd) = vertices[b]
return ((x-xg)*yd + (xd-x)*yg) / (xd-xg)
def _richcmp_(self, other, op):
r"""
Comparisons of two Newton polygons.
TESTS::
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP1 = NewtonPolygon([ (0,0), (1,1), (3,6) ])
sage: NP2 = NewtonPolygon([ (0,0), (1,1), (2,6), (3,6) ])
sage: NP1 == NP2
True
sage: NP1 != NP2
False
sage: NP1 >= NP1 and NP2 >= NP2
True
sage: NP1 > NP1 or NP2 > NP2
False
sage: NP1 = NewtonPolygon([ (0,0), (1,1), (2,6) ])
sage: NP2 = NewtonPolygon([ (0,0), (1,3/2) ], last_slope=2)
sage: NP3 = NP1 + NP2
sage: NP1 <= NP2
False
sage: NP3 <= NP1
True
sage: NP3 <= NP2
True
sage: NP1 < NP1
False
sage: NP1 < NP2
False
sage: NP1 >= NP2
False
sage: NP1 >= NP3
True
sage: NP1 > NP1
False
sage: NP1 > NP2
False
sage: NP1 >= NP3 and NP2 >= NP3 and NP3 <= NP1 and NP3 <= NP2
True
sage: NP1 > NP3 and NP2 > NP3
True
sage: NP3 < NP2 and NP3 < NP1
True
"""
if self._polyhedron == other._polyhedron:
return op == op_EQ or op == op_LE or op == op_GE
elif op == op_NE:
return True
elif op == op_EQ:
return False
if op == op_LT or op == op_LE:
if self.last_slope() > other.last_slope():
return False
return all(v in self._polyhedron for v in other.vertices())
else:
if self.last_slope() < other.last_slope():
return False
return all(v in other._polyhedron for v in self.vertices())
def plot(self, **kwargs):
"""
Plot this Newton polygon.
.. NOTE::
All usual rendering options (color, thickness, etc.) are available.
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,6) ])
sage: polygon = NP.plot()
"""
vertices = self.vertices()
if len(vertices) == 0:
from sage.plot.graphics import Graphics
return Graphics()
else:
from sage.plot.line import line
(xstart,ystart) = vertices[0]
(xend,yend) = vertices[-1]
if self.last_slope() is Infinity:
return line([(xstart, ystart+1), (xstart,ystart+0.5)], linestyle="--", **kwargs) \
+ line([(xstart, ystart+0.5)] + vertices + [(xend, yend+0.5)], **kwargs) \
+ line([(xend, yend+0.5), (xend, yend+1)], linestyle="--", **kwargs)
else:
return line([(xstart, ystart+1), (xstart,ystart+0.5)], linestyle="--", **kwargs) \
+ line([(xstart, ystart+0.5)] + vertices + [(xend+0.5, yend + 0.5*self.last_slope())], **kwargs) \
+ line([(xend+0.5, yend + 0.5*self.last_slope()), (xend+1, yend+self.last_slope())], linestyle="--", **kwargs)
def reverse(self, degree=None):
"""
Returns the symmetric of ``self``
INPUT:
- ``degree`` -- an integer (default: the top right abscissa of
this Newton polygon)
OUTPUT:
The image this Newton polygon under the symmetry
'(x,y) \mapsto (degree-x, y)`
EXAMPLES:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NP = NewtonPolygon([ (0,0), (1,1), (2,5) ])
sage: NP2 = NP.reverse(); NP2
Finite Newton polygon with 3 vertices: (0, 5), (1, 1), (2, 0)
We check that the slopes of the symmetric Newton polygon are
the opposites of the slopes of the original Newton polygon::
sage: NP.slopes()
[1, 4]
sage: NP2.slopes()
[-4, -1]
"""
if self.last_slope() is not Infinity:
raise ValueError("Can only reverse *finite* Newton polygons")
if degree is None:
degree = self.vertices()[-1][0]
vertices = [ (degree-x,y) for (x,y) in self.vertices() ]
vertices.reverse()
parent = self.parent()
polyhedron = Polyhedron(base_ring=parent.base_ring(), vertices=vertices, rays=[(0,1)])
return parent(polyhedron)
class ParentNewtonPolygon(Parent, UniqueRepresentation):
"""
Construct a Newton polygon.
INPUT:
- ``arg`` -- a list/tuple/iterable of vertices or of
slopes. Currently, slopes must be rational numbers.
- ``sort_slopes`` -- boolean (default: ``True``). Specifying
whether slopes must be first sorted
- ``last_slope`` -- rational or infinity (default:
``Infinity``). The last slope of the Newton polygon
OUTPUT:
The corresponding Newton polygon.
.. note::
By convention, a Newton polygon always contains the point
at infinity `(0, \infty)`. These polygons are attached to
polynomials or series over discrete valuation rings (e.g. padics).
EXAMPLES:
We specify here a Newton polygon by its vertices::
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NewtonPolygon([ (0,0), (1,1), (3,5) ])
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 5)
We note that the convex hull of the vertices is automatically
computed::
sage: NewtonPolygon([ (0,0), (1,1), (2,8), (3,5) ])
Finite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 5)
Note that the value ``+Infinity`` is allowed as the second coordinate
of a vertex::
sage: NewtonPolygon([ (0,0), (1,Infinity), (2,8), (3,5) ])
Finite Newton polygon with 2 vertices: (0, 0), (3, 5)
If last_slope is set, the returned Newton polygon is infinite
and ends with an infinite line having the specified slope::
sage: NewtonPolygon([ (0,0), (1,1), (2,8), (3,5) ], last_slope=3)
Infinite Newton polygon with 3 vertices: (0, 0), (1, 1), (3, 5) ending by an infinite line of slope 3
Specifying a last slope may discard some vertices::
sage: NewtonPolygon([ (0,0), (1,1), (2,8), (3,5) ], last_slope=3/2)
Infinite Newton polygon with 2 vertices: (0, 0), (1, 1) ending by an infinite line of slope 3/2
Next, we define a Newton polygon by its slopes::
sage: NP = NewtonPolygon([0, 1/2, 1/2, 2/3, 2/3, 2/3, 1, 1])
sage: NP
Finite Newton polygon with 5 vertices: (0, 0), (1, 0), (3, 1), (6, 3), (8, 5)
sage: NP.slopes()
[0, 1/2, 1/2, 2/3, 2/3, 2/3, 1, 1]
By default, slopes are automatically sorted::
sage: NP2 = NewtonPolygon([0, 1, 1/2, 2/3, 1/2, 2/3, 1, 2/3])
sage: NP2
Finite Newton polygon with 5 vertices: (0, 0), (1, 0), (3, 1), (6, 3), (8, 5)
sage: NP == NP2
True
except if the contrary is explicitely mentioned::
sage: NewtonPolygon([0, 1, 1/2, 2/3, 1/2, 2/3, 1, 2/3], sort_slopes=False)
Finite Newton polygon with 4 vertices: (0, 0), (1, 0), (6, 10/3), (8, 5)
Slopes greater that or equal last_slope (if specified) are discarded::
sage: NP = NewtonPolygon([0, 1/2, 1/2, 2/3, 2/3, 2/3, 1, 1], last_slope=2/3)
sage: NP
Infinite Newton polygon with 3 vertices: (0, 0), (1, 0), (3, 1) ending by an infinite line of slope 2/3
sage: NP.slopes()
[0, 1/2, 1/2]
Be careful, do not confuse Newton polygons provided by this class
with Newton polytopes. Compare::
sage: NP = NewtonPolygon([ (0,0), (1,45), (3,6) ]); NP
Finite Newton polygon with 2 vertices: (0, 0), (3, 6)
sage: x, y = polygen(QQ,'x, y')
sage: p = 1 + x*y**45 + x**3*y**6
sage: p.newton_polytope()
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: p.newton_polytope().vertices()
(A vertex at (0, 0), A vertex at (1, 45), A vertex at (3, 6))
"""
Element = NewtonPolygon_element
def __init__(self):
"""
Parent class for all Newton polygons.
sage: from sage.geometry.newton_polygon import ParentNewtonPolygon
sage: ParentNewtonPolygon()
Parent for Newton polygons
TESTS:
This class is a singleton.
sage: ParentNewtonPolygon() is ParentNewtonPolygon()
True
::
sage: TestSuite(ParentNewtonPolygon()).run()
"""
from sage.categories.semirings import Semirings
from sage.rings.rational_field import QQ
Parent.__init__(self, category=Semirings(), base=QQ)
def _repr_(self):
"""
Returns the string representation of this parent,
which is ``Parent for Newton polygons``
TESTS:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NewtonPolygon
Parent for Newton polygons
sage: NewtonPolygon._repr_()
'Parent for Newton polygons'
"""
return "Parent for Newton polygons"
def _an_element_(self):
"""
Returns a Newton polygon (which is the empty one)
TESTS:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NewtonPolygon._an_element_()
Empty Newton polygon
"""
return self(Polyhedron(base_ring=self.base_ring(), ambient_dim=2))
def _element_constructor_(self, arg, sort_slopes=True, last_slope=Infinity):
"""
INPUT:
- ``arg`` -- an argument describing the Newton polygon
- ``sort_slopes`` -- boolean (default: ``True``). Specifying
whether slopes must be first sorted
- ``last_slope`` -- rational or infinity (default:
``Infinity``). The last slope of the Newton polygon
The first argument ``arg`` can be either:
- a polyhedron in `\QQ^2`
- the element ``0`` (corresponding to the empty Newton polygon)
- the element ``1`` (corresponding to the Newton polygon of the
constant polynomial equal to 1)
- a list/tuple/iterable of vertices
- a list/tuple/iterable of slopes
OUTPUT:
The corresponding Newton polygon.
For more informations, see :class:`ParentNewtonPolygon`.
TESTS:
sage: from sage.geometry.newton_polygon import NewtonPolygon
sage: NewtonPolygon(0)
Empty Newton polygon
sage: NewtonPolygon(1)
Finite Newton polygon with 1 vertex: (0, 0)
"""
if is_Polyhedron(arg):
return self.element_class(arg, parent=self)
if arg == 0:
polyhedron = Polyhedron(base_ring=self.base_ring(), ambient_dim=2)
return self.element_class(polyhedron, parent=self)
if arg == 1:
polyhedron = Polyhedron(base_ring=self.base_ring(),
vertices=[(0,0)], rays=[(0,1)])
return self.element_class(polyhedron, parent=self)
if not isinstance(arg, list):
try:
arg = list(arg)
except TypeError:
raise TypeError("argument must be a list of coordinates or a list of (rational) slopes")
if len(arg) > 0 and arg[0] in self.base_ring():
if sort_slopes: arg.sort()
x = y = 0
vertices = [(x, y)]
for slope in arg:
if not slope in self.base_ring():
raise TypeError("argument must be a list of coordinates or a list of (rational) slopes")
x += 1
y += slope
vertices.append((x, y))
else:
vertices = [(x, y) for (x, y) in arg if y is not Infinity]
if len(vertices) == 0:
polyhedron = Polyhedron(base_ring=self.base_ring(), ambient_dim=2)
else:
rays = [(0, 1)]
if last_slope is not Infinity:
rays.append((1, last_slope))
polyhedron = Polyhedron(base_ring=self.base_ring(), vertices=vertices, rays=rays)
return self.element_class(polyhedron, parent=self)
NewtonPolygon = ParentNewtonPolygon()
| 32.495364 | 159 | 0.54076 |
795692bfefc55904b1ab140a046f29f1fc1baf65 | 3,113 | py | Python | src/oscar/views/decorators.py | highbiza/django-oscar | 7eba207a77e5dd56b04a63b9283a9d76da2f64ac | [
"BSD-3-Clause"
] | 1 | 2019-10-22T01:10:20.000Z | 2019-10-22T01:10:20.000Z | src/oscar/views/decorators.py | highbiza/django-oscar | 7eba207a77e5dd56b04a63b9283a9d76da2f64ac | [
"BSD-3-Clause"
] | 10 | 2020-05-11T20:33:31.000Z | 2022-03-12T00:24:28.000Z | src/oscar/views/decorators.py | highbiza/django-oscar | 7eba207a77e5dd56b04a63b9283a9d76da2f64ac | [
"BSD-3-Clause"
] | 3 | 2019-03-20T16:17:58.000Z | 2022-02-25T09:38:38.000Z | import collections
from functools import wraps
from django.contrib.auth.decorators import user_passes_test
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.urls import reverse_lazy
def check_permissions(user, permissions):
"""
Permissions can be a list or a tuple of lists. If it is a tuple,
every permission list will be evaluated and the outcome will be checked
for truthiness.
Each item of the list(s) must be either a valid Django permission name
(model.codename) or a property or method on the User model
(e.g. 'is_active', 'is_superuser').
Example usage:
- permissions_required(['is_anonymous', ])
would replace login_forbidden
- permissions_required((['is_staff',], ['partner.dashboard_access']))
allows both staff users and users with the above permission
"""
def _check_one_permission_list(perms):
regular_permissions = [perm for perm in perms if '.' in perm]
conditions = [perm for perm in perms if '.' not in perm]
# always check for is_active if not checking for is_anonymous
if (conditions and
'is_anonymous' not in conditions and
'is_active' not in conditions):
conditions.append('is_active')
attributes = [getattr(user, perm) for perm in conditions]
# evaluates methods, explicitly casts properties to booleans
passes_conditions = all([
attr() if isinstance(attr, collections.Callable) else bool(attr) for attr in attributes])
return passes_conditions and user.has_perms(regular_permissions)
if not permissions:
return True
elif isinstance(permissions, list):
return _check_one_permission_list(permissions)
else:
return any(_check_one_permission_list(perm) for perm in permissions)
def permissions_required(permissions, login_url=None):
"""
Decorator that checks if a user has the given permissions.
Accepts a list or tuple of lists of permissions (see check_permissions
documentation).
If the user is not logged in and the test fails, she is redirected to a
login page. If the user is logged in, she gets a HTTP 403 Permission Denied
message, analogous to Django's permission_required decorator.
"""
if login_url is None:
login_url = reverse_lazy('customer:login')
def _check_permissions(user):
outcome = check_permissions(user, permissions)
if not outcome and user.is_authenticated:
raise PermissionDenied
else:
return outcome
return user_passes_test(_check_permissions, login_url=login_url)
def login_forbidden(view_func, template_name='login_forbidden.html',
status=403):
"""
Only allow anonymous users to access this view.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if not request.user.is_authenticated:
return view_func(request, *args, **kwargs)
return render(request, template_name, status=status)
return _checklogin
| 37.963415 | 101 | 0.700289 |
795692eb59ae57c456578a5baa59e1cc8f279c2e | 12,554 | py | Python | 015-word2vec/skip-gram.py | MuhamedEssam/deeplearning | e6004da4df5d8d066f637dc471f4a0f590f3af1e | [
"Apache-2.0"
] | 22 | 2018-03-05T11:17:48.000Z | 2021-06-15T02:10:36.000Z | 015-word2vec/skip-gram.py | MuhamedEssam/deeplearning | e6004da4df5d8d066f637dc471f4a0f590f3af1e | [
"Apache-2.0"
] | 7 | 2018-03-10T10:17:30.000Z | 2018-04-23T00:57:39.000Z | 015-word2vec/skip-gram.py | deepcollege/deeplearning | e6004da4df5d8d066f637dc471f4a0f590f3af1e | [
"Apache-2.0"
] | 8 | 2018-03-06T01:21:31.000Z | 2021-06-15T02:10:37.000Z | # code is from https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename, local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename + '. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
print('Retrieved a file', filename)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run([optimizer, merged, loss], feed_dict=feed_dict, run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| 39.109034 | 122 | 0.685519 |
795693317f2f3b7de6f16769627947881070a31a | 3,926 | py | Python | mockups/godlike/Ui_aranduka.py | webmedic/booker | e4d0e8c7b7c3d8c664c760ec9458ec306a0e1116 | [
"MIT"
] | null | null | null | mockups/godlike/Ui_aranduka.py | webmedic/booker | e4d0e8c7b7c3d8c664c760ec9458ec306a0e1116 | [
"MIT"
] | null | null | null | mockups/godlike/Ui_aranduka.py | webmedic/booker | e4d0e8c7b7c3d8c664c760ec9458ec306a0e1116 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/godlike/eric/aranduka/aranduka.ui'
#
# Created: Wed Sep 1 00:32:14 2010
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1032, 700)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtGui.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(0, 10, 951, 581))
self.widget.setObjectName("widget")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1032, 21))
self.menubar.setObjectName("menubar")
self.menuSi_aca_van_los_menuses = QtGui.QMenu(self.menubar)
self.menuSi_aca_van_los_menuses.setObjectName("menuSi_aca_van_los_menuses")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.ToolBarArea(QtCore.Qt.TopToolBarArea), self.toolBar)
self.actionPero_no_puse_nada_todavia = QtGui.QAction(MainWindow)
self.actionPero_no_puse_nada_todavia.setObjectName("actionPero_no_puse_nada_todavia")
self.actionMain = QtGui.QAction(MainWindow)
self.actionMain.setObjectName("actionMain")
self.actionView_Queue = QtGui.QAction(MainWindow)
self.actionView_Queue.setObjectName("actionView_Queue")
self.actionShelves = QtGui.QAction(MainWindow)
self.actionShelves.setObjectName("actionShelves")
self.actionTags = QtGui.QAction(MainWindow)
self.actionTags.setObjectName("actionTags")
self.menuSi_aca_van_los_menuses.addAction(self.actionPero_no_puse_nada_todavia)
self.menubar.addAction(self.menuSi_aca_van_los_menuses.menuAction())
self.toolBar.addAction(self.actionMain)
self.toolBar.addAction(self.actionView_Queue)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionShelves)
self.toolBar.addAction(self.actionTags)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.menuSi_aca_van_los_menuses.setTitle(QtGui.QApplication.translate("MainWindow", "Si, aca van los menuses", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("MainWindow", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionPero_no_puse_nada_todavia.setText(QtGui.QApplication.translate("MainWindow", "Pero no puse nada todavia", None, QtGui.QApplication.UnicodeUTF8))
self.actionMain.setText(QtGui.QApplication.translate("MainWindow", "Main", None, QtGui.QApplication.UnicodeUTF8))
self.actionView_Queue.setText(QtGui.QApplication.translate("MainWindow", "View Queue", None, QtGui.QApplication.UnicodeUTF8))
self.actionShelves.setText(QtGui.QApplication.translate("MainWindow", "Shelves", None, QtGui.QApplication.UnicodeUTF8))
self.actionTags.setText(QtGui.QApplication.translate("MainWindow", "Tags", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 52.346667 | 163 | 0.736373 |
795693cc5e3df178d55e80d4940be1b7f5210570 | 55 | py | Python | enthought/pyface/ui/qt4/timer/timer.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/pyface/ui/qt4/timer/timer.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/pyface/ui/qt4/timer/timer.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from pyface.ui.qt4.timer.timer import *
| 18.333333 | 39 | 0.763636 |
79569405ffdf30f7ceabd03457ff43bf5030abaf | 3,109 | py | Python | tf2onnx/optimizer/__init__.py | gerbenvv/tensorflow-onnx | 4cfc02520ab21b129ee025c5f5066c02ff850ca2 | [
"Apache-2.0"
] | 1 | 2021-04-30T15:26:06.000Z | 2021-04-30T15:26:06.000Z | tf2onnx/optimizer/__init__.py | hxcai/tensorflow-onnx | 97d38f12651d2bfeca3be274c1c654d2344d4cdf | [
"Apache-2.0"
] | null | null | null | tf2onnx/optimizer/__init__.py | hxcai/tensorflow-onnx | 97d38f12651d2bfeca3be274c1c654d2344d4cdf | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
"""tf2onnx.optimizer module"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import copy
from .const_fold_optimizer import ConstFoldOptimizer
from .identity_optimizer import IdentityOptimizer
from .merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer
from .transpose_optimizer import TransposeOptimizer
from .loop_optimizer import LoopOptimizer
from .back_to_back_optimizer import BackToBackOptimizer
from .upsample_optimizer import UpsampleOptimizer
from .const_dequantize_optimizer import ConstDequantizeOptimizer
from .reshape_optimizer import ReshapeOptimizer
from .. import logging
# optimizer sequence need to be considered carefully
_optimizers = OrderedDict([
("optimize_transpose", TransposeOptimizer),
("remove_redundant_upsample", UpsampleOptimizer),
("fold_constants", ConstFoldOptimizer),
("const_dequantize_optimizer", ConstDequantizeOptimizer),
("loop_optimizer", LoopOptimizer),
# merge_duplication should be used after optimize_transpose
# for optimize_transpose may have some trans nodes that can be merge
("merge_duplication", MergeDuplicatedNodesOptimizer),
("reshape_optimizer", ReshapeOptimizer),
("remove_identity", IdentityOptimizer),
("remove_back_to_back", BackToBackOptimizer),
])
def _get_optimizers():
return _optimizers
def optimize_graph(graph, catch_errors=True):
""" Optimize graph, return optimized graph. No throw if catch_errors is true"""
logger = logging.getLogger(__name__)
logger.info("Optimizing ONNX model")
before = graph.dump_node_statistics()
opts = _get_optimizers()
continue_flag = True
while continue_flag:
continue_flag = False
for name, factory in opts.items():
logger.verbose("Apply %s", name)
if catch_errors:
try:
current = copy.deepcopy(graph)
opt = factory()
graph = opt.optimize(current) or graph
continue_flag = continue_flag or opt.graph_been_opt
except Exception: # pylint: disable=broad-except
# if current optimizer fails, continue with other optimizers
logger.warning("Failed to apply %s", name, exc_info=1)
else:
opt = factory()
graph = opt.optimize(graph)
continue_flag = continue_flag or opt.graph_been_opt
try:
graph.topological_sort(graph.get_nodes())
except Exception: # pylint: disable=broad-except
logger.warning("Failed topological_sort", exc_info=1)
after = graph.dump_node_statistics()
diff = copy.deepcopy(after)
diff.subtract(before)
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
for k, v in sorted(diff.items()) if v != 0]
logger.info("After optimization: %s", ', '.join(diff) if diff else "no change")
return graph
| 37.914634 | 108 | 0.698617 |
795694e97b48b87f2686bb723392cbb0f37088f7 | 9,574 | py | Python | docs/conf.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | docs/conf.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | docs/conf.py | hasadi-ha/minemeld-core | eb135597ce895b78f4c2ec272ffc8a45a12962bd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# minemeld-core documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 6 14:14:33 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# flake8: noqa
import sys
import os
import shlex
# from http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/
import mock
MOCK_MODULES = ['plyvel']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'minemeld-core'
copyright = u'2015, Palo Alto Networks'
author = u'Palo Alto Networks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.44.post1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'minemeld-coredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'minemeld-core.tex', u'modindex_common_prefixer-wagon Documentation',
u'Palo Alto Networks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'minemeld-core', u'minemeld-core Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'minemeld-core', u'minemeld-core Documentation',
author, 'minemeld-core', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.23569 | 128 | 0.719657 |
795694ebee51e393607abcf401497175c10f3acd | 63,512 | py | Python | test/python/dagcircuit/test_dagcircuit.py | biblio-techers/Qiskit-Fall-Fest-2021 | 17bb4f21d4c7f4723c43b87ebae21196e8aa53c9 | [
"Apache-2.0"
] | 1 | 2021-09-25T18:49:52.000Z | 2021-09-25T18:49:52.000Z | test/python/dagcircuit/test_dagcircuit.py | biblio-techers/Qiskit-Fall-Fest-2021 | 17bb4f21d4c7f4723c43b87ebae21196e8aa53c9 | [
"Apache-2.0"
] | null | null | null | test/python/dagcircuit/test_dagcircuit.py | biblio-techers/Qiskit-Fall-Fest-2021 | 17bb4f21d4c7f4723c43b87ebae21196e8aa53c9 | [
"Apache-2.0"
] | 1 | 2020-04-15T07:23:23.000Z | 2020-04-15T07:23:23.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the DAGCircuit object"""
from collections import Counter
import unittest
from ddt import ddt, data
import retworkx as rx
from numpy import pi
from qiskit.dagcircuit import DAGCircuit, DAGOpNode, DAGInNode, DAGOutNode
from qiskit.circuit import QuantumRegister
from qiskit.circuit import ClassicalRegister, Clbit
from qiskit.circuit import QuantumCircuit, Qubit
from qiskit.circuit import Measure
from qiskit.circuit import Reset
from qiskit.circuit import Delay
from qiskit.circuit import Gate, Instruction
from qiskit.circuit import Parameter
from qiskit.circuit.library.standard_gates.i import IGate
from qiskit.circuit.library.standard_gates.h import HGate
from qiskit.circuit.library.standard_gates.x import CXGate
from qiskit.circuit.library.standard_gates.z import CZGate
from qiskit.circuit.library.standard_gates.x import XGate
from qiskit.circuit.library.standard_gates.y import YGate
from qiskit.circuit.library.standard_gates.u1 import U1Gate
from qiskit.circuit.barrier import Barrier
from qiskit.dagcircuit.exceptions import DAGCircuitError
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
def raise_if_dagcircuit_invalid(dag):
"""Validates the internal consistency of a DAGCircuit._multi_graph.
Intended for use in testing.
Raises:
DAGCircuitError: if DAGCircuit._multi_graph is inconsistent.
"""
multi_graph = dag._multi_graph
if not rx.is_directed_acyclic_graph(multi_graph):
raise DAGCircuitError("multi_graph is not a DAG.")
# Every node should be of type in, out, or op.
# All input/output nodes should be present in input_map/output_map.
for node in dag._multi_graph.nodes():
if isinstance(node, DAGInNode):
assert node is dag.input_map[node.wire]
elif isinstance(node, DAGOutNode):
assert node is dag.output_map[node.wire]
elif isinstance(node, DAGOpNode):
continue
else:
raise DAGCircuitError(f"Found node of unexpected type: {type(node)}")
# Shape of node.op should match shape of node.
for node in dag.op_nodes():
assert len(node.qargs) == node.op.num_qubits
assert len(node.cargs) == node.op.num_clbits
# Every edge should be labled with a known wire.
edges_outside_wires = [
edge_data for edge_data in dag._multi_graph.edges() if edge_data not in dag.wires
]
if edges_outside_wires:
raise DAGCircuitError(
"multi_graph contains one or more edges ({}) "
"not found in DAGCircuit.wires ({}).".format(edges_outside_wires, dag.wires)
)
# Every wire should have exactly one input node and one output node.
for wire in dag.wires:
in_node = dag.input_map[wire]
out_node = dag.output_map[wire]
assert in_node.wire == wire
assert out_node.wire == wire
assert isinstance(in_node, DAGInNode)
assert isinstance(out_node, DAGOutNode)
# Every wire should be propagated by exactly one edge between nodes.
for wire in dag.wires:
cur_node_id = dag.input_map[wire]._node_id
out_node_id = dag.output_map[wire]._node_id
while cur_node_id != out_node_id:
out_edges = dag._multi_graph.out_edges(cur_node_id)
edges_to_follow = [(src, dest, data) for (src, dest, data) in out_edges if data == wire]
assert len(edges_to_follow) == 1
cur_node_id = edges_to_follow[0][1]
# Wires can only terminate at input/output nodes.
op_counts = Counter()
for op_node in dag.op_nodes():
assert multi_graph.in_degree(op_node._node_id) == multi_graph.out_degree(op_node._node_id)
op_counts[op_node.name] += 1
# The _op_names attribute should match the counted op names
assert op_counts == dag._op_names
# Node input/output edges should match node qarg/carg/condition.
for node in dag.op_nodes():
in_edges = dag._multi_graph.in_edges(node._node_id)
out_edges = dag._multi_graph.out_edges(node._node_id)
in_wires = {data for src, dest, data in in_edges}
out_wires = {data for src, dest, data in out_edges}
node_cond_bits = set(node.op.condition[0][:] if node.op.condition is not None else [])
node_qubits = set(node.qargs)
node_clbits = set(node.cargs)
all_bits = node_qubits | node_clbits | node_cond_bits
assert in_wires == all_bits, f"In-edge wires {in_wires} != node bits {all_bits}"
assert out_wires == all_bits, "Out-edge wires {} != node bits {}".format(
out_wires, all_bits
)
class TestDagRegisters(QiskitTestCase):
"""Test qreg and creg inside the dag"""
def test_add_qreg_creg(self):
"""add_qreg() and add_creg() methods"""
dag = DAGCircuit()
dag.add_qreg(QuantumRegister(2, "qr"))
dag.add_creg(ClassicalRegister(1, "cr"))
self.assertDictEqual(dag.qregs, {"qr": QuantumRegister(2, "qr")})
self.assertDictEqual(dag.cregs, {"cr": ClassicalRegister(1, "cr")})
def test_dag_get_qubits(self):
"""get_qubits() method"""
dag = DAGCircuit()
dag.add_qreg(QuantumRegister(1, "qr1"))
dag.add_qreg(QuantumRegister(1, "qr10"))
dag.add_qreg(QuantumRegister(1, "qr0"))
dag.add_qreg(QuantumRegister(1, "qr3"))
dag.add_qreg(QuantumRegister(1, "qr4"))
dag.add_qreg(QuantumRegister(1, "qr6"))
self.assertListEqual(
dag.qubits,
[
QuantumRegister(1, "qr1")[0],
QuantumRegister(1, "qr10")[0],
QuantumRegister(1, "qr0")[0],
QuantumRegister(1, "qr3")[0],
QuantumRegister(1, "qr4")[0],
QuantumRegister(1, "qr6")[0],
],
)
def test_add_reg_duplicate(self):
"""add_qreg with the same register twice is not allowed."""
dag = DAGCircuit()
qr = QuantumRegister(2)
dag.add_qreg(qr)
self.assertRaises(DAGCircuitError, dag.add_qreg, qr)
def test_add_reg_duplicate_name(self):
"""Adding quantum registers with the same name is not allowed."""
dag = DAGCircuit()
qr1 = QuantumRegister(3, "qr")
dag.add_qreg(qr1)
qr2 = QuantumRegister(2, "qr")
self.assertRaises(DAGCircuitError, dag.add_qreg, qr2)
def test_add_reg_bad_type(self):
"""add_qreg with a classical register is not allowed."""
dag = DAGCircuit()
cr = ClassicalRegister(2)
self.assertRaises(DAGCircuitError, dag.add_qreg, cr)
def test_add_qubits_invalid_qubits(self):
"""Verify we raise if pass not a Qubit."""
dag = DAGCircuit()
with self.assertRaisesRegex(DAGCircuitError, "not a Qubit instance"):
dag.add_qubits([Clbit()])
with self.assertRaisesRegex(DAGCircuitError, "not a Qubit instance"):
dag.add_qubits([Qubit(), Clbit(), Qubit()])
def test_add_qubits_invalid_clbits(self):
"""Verify we raise if pass not a Clbit."""
dag = DAGCircuit()
with self.assertRaisesRegex(DAGCircuitError, "not a Clbit instance"):
dag.add_clbits([Qubit()])
with self.assertRaisesRegex(DAGCircuitError, "not a Clbit instance"):
dag.add_clbits([Clbit(), Qubit(), Clbit()])
def test_raise_if_bits_already_present(self):
"""Verify we raise when attempting to add a Bit already in the DAG."""
dag = DAGCircuit()
qubits = [Qubit(), Qubit()]
clbits = [Clbit(), Clbit()]
dag.add_qubits(qubits)
dag.add_clbits(clbits)
with self.assertRaisesRegex(DAGCircuitError, "duplicate qubits"):
dag.add_qubits(qubits)
with self.assertRaisesRegex(DAGCircuitError, "duplicate clbits "):
dag.add_clbits(clbits)
def test_raise_if_bits_already_present_from_register(self):
"""Verify we raise when attempting to add a Bit already in the DAG."""
dag = DAGCircuit()
qr = QuantumRegister(2, "q")
cr = ClassicalRegister(2, "c")
dag.add_creg(cr)
dag.add_qreg(qr)
with self.assertRaisesRegex(DAGCircuitError, "duplicate qubits"):
dag.add_qubits(qr[:])
with self.assertRaisesRegex(DAGCircuitError, "duplicate clbits "):
dag.add_clbits(cr[:])
def test_addding_individual_bit(self):
"""Verify we can add a individual bits to a DAG."""
qr = QuantumRegister(3, "qr")
dag = DAGCircuit()
dag.add_qreg(qr)
new_bit = Qubit()
dag.add_qubits([new_bit])
self.assertEqual(dag.qubits, list(qr) + [new_bit])
self.assertEqual(list(dag.qregs.values()), [qr])
class TestDagApplyOperation(QiskitTestCase):
"""Test adding an op node to a dag."""
def setUp(self):
super().setUp()
self.dag = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
self.dag.add_qreg(qreg)
self.dag.add_creg(creg)
self.qubit0 = qreg[0]
self.qubit1 = qreg[1]
self.qubit2 = qreg[2]
self.clbit0 = creg[0]
self.clbit1 = creg[1]
self.condition = (creg, 3)
def test_apply_operation_back(self):
"""The apply_operation_back() method."""
x_gate = XGate()
x_gate.condition = self.condition
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.dag.apply_operation_back(x_gate, [self.qubit1], [])
self.dag.apply_operation_back(Measure(), [self.qubit0, self.clbit0], [])
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.assertEqual(len(list(self.dag.nodes())), 16)
self.assertEqual(len(list(self.dag.edges())), 17)
def test_edges(self):
"""Test that DAGCircuit.edges() behaves as expected with ops."""
x_gate = XGate()
x_gate.condition = self.condition
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.dag.apply_operation_back(x_gate, [self.qubit1], [])
self.dag.apply_operation_back(Measure(), [self.qubit0, self.clbit0], [])
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
out_edges = self.dag.edges(self.dag.output_map.values())
self.assertEqual(list(out_edges), [])
in_edges = self.dag.edges(self.dag.input_map.values())
# number of edges for input nodes should be the same as number of wires
self.assertEqual(len(list(in_edges)), 5)
def test_apply_operation_back_conditional(self):
"""Test consistency of apply_operation_back with condition set."""
# Single qubit gate conditional: qc.h(qr[2]).c_if(cr, 3)
h_gate = HGate()
h_gate.condition = self.condition
h_node = self.dag.apply_operation_back(h_gate, [self.qubit2], [])
self.assertEqual(h_node.qargs, [self.qubit2])
self.assertEqual(h_node.cargs, [])
self.assertEqual(h_node.op.condition, h_gate.condition)
self.assertEqual(
sorted(self.dag._multi_graph.in_edges(h_node._node_id)),
sorted(
[
(self.dag.input_map[self.qubit2]._node_id, h_node._node_id, self.qubit2),
(self.dag.input_map[self.clbit0]._node_id, h_node._node_id, self.clbit0),
(self.dag.input_map[self.clbit1]._node_id, h_node._node_id, self.clbit1),
]
),
)
self.assertEqual(
sorted(self.dag._multi_graph.out_edges(h_node._node_id)),
sorted(
[
(h_node._node_id, self.dag.output_map[self.qubit2]._node_id, self.qubit2),
(h_node._node_id, self.dag.output_map[self.clbit0]._node_id, self.clbit0),
(h_node._node_id, self.dag.output_map[self.clbit1]._node_id, self.clbit1),
]
),
)
self.assertTrue(rx.is_directed_acyclic_graph(self.dag._multi_graph))
def test_apply_operation_back_conditional_measure(self):
"""Test consistency of apply_operation_back for conditional measure."""
# Measure targeting a clbit which is not a member of the conditional
# register. qc.measure(qr[0], cr[0]).c_if(cr2, 0)
new_creg = ClassicalRegister(1, "cr2")
self.dag.add_creg(new_creg)
meas_gate = Measure()
meas_gate.condition = (new_creg, 0)
meas_node = self.dag.apply_operation_back(meas_gate, [self.qubit0], [self.clbit0])
self.assertEqual(meas_node.qargs, [self.qubit0])
self.assertEqual(meas_node.cargs, [self.clbit0])
self.assertEqual(meas_node.op.condition, meas_gate.condition)
self.assertEqual(
sorted(self.dag._multi_graph.in_edges(meas_node._node_id)),
sorted(
[
(self.dag.input_map[self.qubit0]._node_id, meas_node._node_id, self.qubit0),
(self.dag.input_map[self.clbit0]._node_id, meas_node._node_id, self.clbit0),
(
self.dag.input_map[new_creg[0]]._node_id,
meas_node._node_id,
Clbit(new_creg, 0),
),
]
),
)
self.assertEqual(
sorted(self.dag._multi_graph.out_edges(meas_node._node_id)),
sorted(
[
(meas_node._node_id, self.dag.output_map[self.qubit0]._node_id, self.qubit0),
(meas_node._node_id, self.dag.output_map[self.clbit0]._node_id, self.clbit0),
(
meas_node._node_id,
self.dag.output_map[new_creg[0]]._node_id,
Clbit(new_creg, 0),
),
]
),
)
self.assertTrue(rx.is_directed_acyclic_graph(self.dag._multi_graph))
def test_apply_operation_back_conditional_measure_to_self(self):
"""Test consistency of apply_operation_back for measure onto conditioning bit."""
# Measure targeting a clbit which _is_ a member of the conditional
# register. qc.measure(qr[0], cr[0]).c_if(cr, 3)
meas_gate = Measure()
meas_gate.condition = self.condition
meas_node = self.dag.apply_operation_back(meas_gate, [self.qubit1], [self.clbit1])
self.assertEqual(meas_node.qargs, [self.qubit1])
self.assertEqual(meas_node.cargs, [self.clbit1])
self.assertEqual(meas_node.op.condition, meas_gate.condition)
self.assertEqual(
sorted(self.dag._multi_graph.in_edges(meas_node._node_id)),
sorted(
[
(self.dag.input_map[self.qubit1]._node_id, meas_node._node_id, self.qubit1),
(self.dag.input_map[self.clbit0]._node_id, meas_node._node_id, self.clbit0),
(self.dag.input_map[self.clbit1]._node_id, meas_node._node_id, self.clbit1),
]
),
)
self.assertEqual(
sorted(self.dag._multi_graph.out_edges(meas_node._node_id)),
sorted(
[
(meas_node._node_id, self.dag.output_map[self.qubit1]._node_id, self.qubit1),
(meas_node._node_id, self.dag.output_map[self.clbit0]._node_id, self.clbit0),
(meas_node._node_id, self.dag.output_map[self.clbit1]._node_id, self.clbit1),
]
),
)
self.assertTrue(rx.is_directed_acyclic_graph(self.dag._multi_graph))
def test_apply_operation_front(self):
"""The apply_operation_front() method"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_front(Reset(), [self.qubit0], [])
h_node = self.dag.op_nodes(op=HGate).pop()
reset_node = self.dag.op_nodes(op=Reset).pop()
self.assertIn(reset_node, set(self.dag.predecessors(h_node)))
class TestDagNodeSelection(QiskitTestCase):
"""Test methods that select certain dag nodes"""
def setUp(self):
super().setUp()
self.dag = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
self.dag.add_qreg(qreg)
self.dag.add_creg(creg)
self.qubit0 = qreg[0]
self.qubit1 = qreg[1]
self.qubit2 = qreg[2]
self.clbit0 = creg[0]
self.clbit1 = creg[1]
self.condition = (creg, 3)
def test_front_layer(self):
"""The method dag.front_layer() returns first layer"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
op_nodes = self.dag.front_layer()
self.assertEqual(len(op_nodes), 1)
self.assertIsInstance(op_nodes[0].op, HGate)
def test_get_op_nodes_all(self):
"""The method dag.op_nodes() returns all op nodes"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
op_nodes = self.dag.op_nodes()
self.assertEqual(len(op_nodes), 3)
for node in op_nodes:
self.assertIsInstance(node.op, Instruction)
def test_get_op_nodes_particular(self):
"""The method dag.gates_nodes(op=AGate) returns all the AGate nodes"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(HGate(), [self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
op_nodes = self.dag.op_nodes(op=HGate)
self.assertEqual(len(op_nodes), 2)
op_node_1 = op_nodes.pop()
op_node_2 = op_nodes.pop()
self.assertIsInstance(op_node_1.op, HGate)
self.assertIsInstance(op_node_2.op, HGate)
def test_quantum_successors(self):
"""The method dag.quantum_successors() returns successors connected by quantum edges"""
# q_0: |0>─────■───|0>─
# ┌─┐┌─┴─┐
# q_1: |0>┤M├┤ X ├─────
# └╥┘└───┘
# c_0: 0 ═╬═══════════
# ║
# c_1: 0 ═╩═══════════
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
successor_measure = self.dag.quantum_successors(self.dag.named_nodes("measure").pop())
cnot_node = next(successor_measure)
with self.assertRaises(StopIteration):
next(successor_measure)
self.assertIsInstance(cnot_node.op, CXGate)
successor_cnot = self.dag.quantum_successors(cnot_node)
# Ordering between Reset and out[q1] is indeterminant.
successor1 = next(successor_cnot)
successor2 = next(successor_cnot)
with self.assertRaises(StopIteration):
next(successor_cnot)
self.assertTrue(
(isinstance(successor1, DAGOutNode) and isinstance(successor2.op, Reset))
or (isinstance(successor2, DAGOutNode) and isinstance(successor1.op, Reset))
)
def test_is_successor(self):
"""The method dag.is_successor(A, B) checks if node B is a successor of A"""
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
measure_node = self.dag.named_nodes("measure")[0]
cx_node = self.dag.named_nodes("cx")[0]
reset_node = self.dag.named_nodes("reset")[0]
self.assertTrue(self.dag.is_successor(measure_node, cx_node))
self.assertFalse(self.dag.is_successor(measure_node, reset_node))
self.assertTrue(self.dag.is_successor(cx_node, reset_node))
def test_quantum_predecessors(self):
"""The method dag.quantum_predecessors() returns predecessors connected by quantum edges"""
# q_0: |0>─|0>───■─────
# ┌─┴─┐┌─┐
# q_1: |0>─────┤ X ├┤M├
# └───┘└╥┘
# c_0: 0 ═══════════╬═
# ║
# c_1: 0 ═══════════╩═
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
predecessor_measure = self.dag.quantum_predecessors(self.dag.named_nodes("measure").pop())
cnot_node = next(predecessor_measure)
with self.assertRaises(StopIteration):
next(predecessor_measure)
self.assertIsInstance(cnot_node.op, CXGate)
predecessor_cnot = self.dag.quantum_predecessors(cnot_node)
# Ordering between Reset and in[q1] is indeterminant.
predecessor1 = next(predecessor_cnot)
predecessor2 = next(predecessor_cnot)
with self.assertRaises(StopIteration):
next(predecessor_cnot)
self.assertTrue(
(isinstance(predecessor1, DAGInNode) and isinstance(predecessor2.op, Reset))
or (isinstance(predecessor2, DAGInNode) and isinstance(predecessor1.op, Reset))
)
def test_is_predecessor(self):
"""The method dag.is_predecessor(A, B) checks if node B is a predecessor of A"""
self.dag.apply_operation_back(Measure(), [self.qubit1, self.clbit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
measure_node = self.dag.named_nodes("measure")[0]
cx_node = self.dag.named_nodes("cx")[0]
reset_node = self.dag.named_nodes("reset")[0]
self.assertTrue(self.dag.is_predecessor(cx_node, measure_node))
self.assertFalse(self.dag.is_predecessor(reset_node, measure_node))
self.assertTrue(self.dag.is_predecessor(reset_node, cx_node))
def test_get_gates_nodes(self):
"""The method dag.gate_nodes() returns all gate nodes"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
op_nodes = self.dag.gate_nodes()
self.assertEqual(len(op_nodes), 2)
op_node_1 = op_nodes.pop()
op_node_2 = op_nodes.pop()
self.assertIsInstance(op_node_1.op, Gate)
self.assertIsInstance(op_node_2.op, Gate)
def test_two_q_gates(self):
"""The method dag.two_qubit_ops() returns all 2Q gate operation nodes"""
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Barrier(2), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(Reset(), [self.qubit0], [])
op_nodes = self.dag.two_qubit_ops()
self.assertEqual(len(op_nodes), 1)
op_node = op_nodes.pop()
self.assertIsInstance(op_node.op, Gate)
self.assertEqual(len(op_node.qargs), 2)
def test_get_named_nodes(self):
"""The get_named_nodes(AName) method returns all the nodes with name AName"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit2], [])
self.dag.apply_operation_back(HGate(), [self.qubit2], [])
# The ordering is not assured, so we only compare the output (unordered) sets.
# We use tuples because lists aren't hashable.
named_nodes = self.dag.named_nodes("cx")
node_qargs = {tuple(node.qargs) for node in named_nodes}
expected_qargs = {
(self.qubit0, self.qubit1),
(self.qubit2, self.qubit1),
(self.qubit0, self.qubit2),
}
self.assertEqual(expected_qargs, node_qargs)
def test_topological_nodes(self):
"""The topological_nodes() method"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit2], [])
self.dag.apply_operation_back(HGate(), [self.qubit2], [])
named_nodes = self.dag.topological_nodes()
qr = self.dag.qregs["qr"]
cr = self.dag.cregs["cr"]
expected = [
qr[0],
qr[1],
("cx", [self.qubit0, self.qubit1]),
("h", [self.qubit0]),
qr[2],
("cx", [self.qubit2, self.qubit1]),
("cx", [self.qubit0, self.qubit2]),
("h", [self.qubit2]),
qr[0],
qr[1],
qr[2],
cr[0],
cr[0],
cr[1],
cr[1],
]
self.assertEqual(
[((i.op.name, i.qargs) if isinstance(i, DAGOpNode) else i.wire) for i in named_nodes],
expected,
)
def test_topological_op_nodes(self):
"""The topological_op_nodes() method"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit2], [])
self.dag.apply_operation_back(HGate(), [self.qubit2], [])
named_nodes = self.dag.topological_op_nodes()
expected = [
("cx", [self.qubit0, self.qubit1]),
("h", [self.qubit0]),
("cx", [self.qubit2, self.qubit1]),
("cx", [self.qubit0, self.qubit2]),
("h", [self.qubit2]),
]
self.assertEqual(expected, [(i.op.name, i.qargs) for i in named_nodes])
def test_dag_nodes_on_wire(self):
"""Test that listing the gates on a qubit/classical bit gets the correct gates"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
qbit = self.dag.qubits[0]
self.assertEqual([0, 10, 11, 1], [i._node_id for i in self.dag.nodes_on_wire(qbit)])
self.assertEqual(
[10, 11], [i._node_id for i in self.dag.nodes_on_wire(qbit, only_ops=True)]
)
cbit = self.dag.clbits[0]
self.assertEqual([6, 7], [i._node_id for i in self.dag.nodes_on_wire(cbit)])
self.assertEqual([], [i._node_id for i in self.dag.nodes_on_wire(cbit, only_ops=True)])
with self.assertRaises(DAGCircuitError):
next(self.dag.nodes_on_wire(QuantumRegister(5, "qr")[4]))
def test_dag_nodes_on_wire_multiple_successors(self):
"""
Test that if an DAGOpNode has multiple successors in the DAG along one wire, they are all
retrieved in order. This could be the case for a circuit such as
q0_0: |0>──■─────────■──
┌─┴─┐┌───┐┌─┴─┐
q0_1: |0>┤ X ├┤ H ├┤ X ├
└───┘└───┘└───┘
Both the 2nd CX gate and the H gate follow the first CX gate in the DAG, so they
both must be returned but in the correct order.
"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(HGate(), [self.qubit1], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
nodes = self.dag.nodes_on_wire(self.dag.qubits[1], only_ops=True)
node_names = [nd.op.name for nd in nodes]
self.assertEqual(node_names, ["cx", "h", "cx"])
def test_remove_op_node(self):
"""Test remove_op_node method."""
self.dag.apply_operation_back(HGate(), [self.qubit0])
op_nodes = self.dag.gate_nodes()
h_gate = op_nodes.pop()
self.dag.remove_op_node(h_gate)
self.assertEqual(len(self.dag.gate_nodes()), 0)
def test_remove_op_node_longer(self):
"""Test remove_op_node method in a "longer" dag"""
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit2])
self.dag.apply_operation_back(HGate(), [self.qubit2])
op_nodes = list(self.dag.topological_op_nodes())
self.dag.remove_op_node(op_nodes[0])
expected = [
("h", [self.qubit0]),
("cx", [self.qubit2, self.qubit1]),
("cx", [self.qubit0, self.qubit2]),
("h", [self.qubit2]),
]
self.assertEqual(expected, [(i.op.name, i.qargs) for i in self.dag.topological_op_nodes()])
def test_remove_non_op_node(self):
"""Try to remove a non-op node with remove_op_node method."""
self.dag.apply_operation_back(HGate(), [self.qubit0])
in_node = next(self.dag.topological_nodes())
self.assertRaises(DAGCircuitError, self.dag.remove_op_node, in_node)
def test_dag_collect_runs(self):
"""Test the collect_runs method with 3 different gates."""
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1])
self.dag.apply_operation_back(CXGate(), [self.qubit1, self.qubit2])
self.dag.apply_operation_back(HGate(), [self.qubit2])
collected_runs = self.dag.collect_runs(["u1", "cx", "h"])
self.assertEqual(len(collected_runs), 3)
for run in collected_runs:
if run[0].op.name == "cx":
self.assertEqual(len(run), 2)
self.assertEqual(["cx"] * 2, [x.op.name for x in run])
self.assertEqual(
[[self.qubit2, self.qubit1], [self.qubit1, self.qubit2]], [x.qargs for x in run]
)
elif run[0].op.name == "h":
self.assertEqual(len(run), 1)
self.assertEqual(["h"], [x.op.name for x in run])
self.assertEqual([[self.qubit2]], [x.qargs for x in run])
elif run[0].op.name == "u1":
self.assertEqual(len(run), 3)
self.assertEqual(["u1"] * 3, [x.op.name for x in run])
self.assertEqual(
[[self.qubit0], [self.qubit0], [self.qubit0]], [x.qargs for x in run]
)
else:
self.fail("Unknown run encountered")
def test_dag_collect_runs_start_with_conditional(self):
"""Test collect runs with a conditional at the start of the run."""
h_gate = HGate()
h_gate.condition = self.condition
self.dag.apply_operation_back(h_gate, [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
collected_runs = self.dag.collect_runs(["h"])
self.assertEqual(len(collected_runs), 1)
run = collected_runs.pop()
self.assertEqual(len(run), 2)
self.assertEqual(["h", "h"], [x.op.name for x in run])
self.assertEqual([[self.qubit0], [self.qubit0]], [x.qargs for x in run])
def test_dag_collect_runs_conditional_in_middle(self):
"""Test collect_runs with a conditional in the middle of a run."""
h_gate = HGate()
h_gate.condition = self.condition
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(h_gate, [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
collected_runs = self.dag.collect_runs(["h"])
# Should return 2 single h gate runs (1 before condition, 1 after)
self.assertEqual(len(collected_runs), 2)
for run in collected_runs:
self.assertEqual(len(run), 1)
self.assertEqual(["h"], [x.op.name for x in run])
self.assertEqual([[self.qubit0]], [x.qargs for x in run])
def test_dag_collect_1q_runs(self):
"""Test the collect_1q_runs method with 3 different gates."""
self.dag.apply_operation_back(Reset(), [self.qubit0])
self.dag.apply_operation_back(Delay(100), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(CXGate(), [self.qubit2, self.qubit1])
self.dag.apply_operation_back(CXGate(), [self.qubit1, self.qubit2])
self.dag.apply_operation_back(HGate(), [self.qubit2])
collected_runs = self.dag.collect_1q_runs()
self.assertEqual(len(collected_runs), 2)
for run in collected_runs:
if run[0].op.name == "h":
self.assertEqual(len(run), 1)
self.assertEqual(["h"], [x.op.name for x in run])
self.assertEqual([[self.qubit2]], [x.qargs for x in run])
elif run[0].op.name == "u1":
self.assertEqual(len(run), 3)
self.assertEqual(["u1"] * 3, [x.op.name for x in run])
self.assertEqual(
[[self.qubit0], [self.qubit0], [self.qubit0]], [x.qargs for x in run]
)
else:
self.fail("Unknown run encountered")
def test_dag_collect_1q_runs_start_with_conditional(self):
"""Test collect 1q runs with a conditional at the start of the run."""
self.dag.apply_operation_back(Reset(), [self.qubit0])
self.dag.apply_operation_back(Delay(100), [self.qubit0])
h_gate = HGate()
h_gate.condition = self.condition
self.dag.apply_operation_back(h_gate, [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
collected_runs = self.dag.collect_1q_runs()
self.assertEqual(len(collected_runs), 1)
run = collected_runs.pop()
self.assertEqual(len(run), 2)
self.assertEqual(["h", "h"], [x.op.name for x in run])
self.assertEqual([[self.qubit0], [self.qubit0]], [x.qargs for x in run])
def test_dag_collect_1q_runs_conditional_in_middle(self):
"""Test collect_1q_runs with a conditional in the middle of a run."""
self.dag.apply_operation_back(Reset(), [self.qubit0])
self.dag.apply_operation_back(Delay(100), [self.qubit0])
h_gate = HGate()
h_gate.condition = self.condition
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(h_gate, [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
collected_runs = self.dag.collect_1q_runs()
# Should return 2 single h gate runs (1 before condition, 1 after)
self.assertEqual(len(collected_runs), 2)
for run in collected_runs:
self.assertEqual(len(run), 1)
self.assertEqual(["h"], [x.op.name for x in run])
self.assertEqual([[self.qubit0]], [x.qargs for x in run])
def test_dag_collect_1q_runs_with_parameterized_gate(self):
"""Test collect 1q splits on parameterized gates."""
theta = Parameter("theta")
self.dag.apply_operation_back(Reset(), [self.qubit0])
self.dag.apply_operation_back(Delay(100), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(U1Gate(theta), [self.qubit0])
self.dag.apply_operation_back(XGate(), [self.qubit0])
self.dag.apply_operation_back(XGate(), [self.qubit0])
collected_runs = self.dag.collect_1q_runs()
self.assertEqual(len(collected_runs), 2)
run_gates = [[x.op.name for x in run] for run in collected_runs]
self.assertIn(["h", "h"], run_gates)
self.assertIn(["x", "x"], run_gates)
self.assertNotIn("u1", [x.op.name for run in collected_runs for x in run])
def test_dag_collect_1q_runs_with_cx_in_middle(self):
"""Test collect_1q_runs_with a cx in the middle of the run."""
self.dag.apply_operation_back(Reset(), [self.qubit0])
self.dag.apply_operation_back(Delay(100), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(HGate(), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit0])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit1])
self.dag.apply_operation_back(U1Gate(3.14), [self.qubit1])
self.dag.apply_operation_back(HGate(), [self.qubit1])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1])
self.dag.apply_operation_back(YGate(), [self.qubit0])
self.dag.apply_operation_back(YGate(), [self.qubit0])
self.dag.apply_operation_back(XGate(), [self.qubit1])
self.dag.apply_operation_back(XGate(), [self.qubit1])
collected_runs = self.dag.collect_1q_runs()
self.assertEqual(len(collected_runs), 4)
for run in collected_runs:
if run[0].op.name == "h":
self.assertEqual(len(run), 3)
self.assertEqual(["h", "h", "u1"], [x.op.name for x in run])
self.assertEqual([[self.qubit0]] * 3, [x.qargs for x in run])
elif run[0].op.name == "u1":
self.assertEqual(len(run), 3)
self.assertEqual(["u1", "u1", "h"], [x.op.name for x in run])
self.assertEqual([[self.qubit1]] * 3, [x.qargs for x in run])
elif run[0].op.name == "x":
self.assertEqual(len(run), 2)
self.assertEqual(["x", "x"], [x.op.name for x in run])
self.assertEqual([[self.qubit1]] * 2, [x.qargs for x in run])
elif run[0].op.name == "y":
self.assertEqual(len(run), 2)
self.assertEqual(["y", "y"], [x.op.name for x in run])
self.assertEqual([[self.qubit0]] * 2, [x.qargs for x in run])
else:
self.fail("Unknown run encountered")
class TestDagLayers(QiskitTestCase):
"""Test finding layers on the dag"""
def test_layers_basic(self):
"""The layers() method returns a list of layers, each of them with a list of nodes."""
qreg = QuantumRegister(2, "qr")
creg = ClassicalRegister(2, "cr")
qubit0 = qreg[0]
qubit1 = qreg[1]
clbit0 = creg[0]
clbit1 = creg[1]
x_gate = XGate()
x_gate.condition = (creg, 3)
dag = DAGCircuit()
dag.add_qreg(qreg)
dag.add_creg(creg)
dag.apply_operation_back(HGate(), [qubit0], [])
dag.apply_operation_back(CXGate(), [qubit0, qubit1], [])
dag.apply_operation_back(Measure(), [qubit1, clbit1], [])
dag.apply_operation_back(x_gate, [qubit1], [])
dag.apply_operation_back(Measure(), [qubit0, clbit0], [])
dag.apply_operation_back(Measure(), [qubit1, clbit1], [])
layers = list(dag.layers())
self.assertEqual(5, len(layers))
name_layers = [
[node.op.name for node in layer["graph"].nodes() if isinstance(node, DAGOpNode)]
for layer in layers
]
self.assertEqual([["h"], ["cx"], ["measure"], ["x"], ["measure", "measure"]], name_layers)
def test_layers_maintains_order(self):
"""Test that the layers method doesn't mess up the order of the DAG as
reported in #2698"""
qr = QuantumRegister(1, "q0")
# the order the nodes should be in
truth = [
(DAGInNode, qr[0], 0),
(DAGOpNode, "x", 2),
(DAGOpNode, "id", 3),
(DAGOutNode, qr[0], 1),
]
# this only occurred sometimes so has to be run more than once
# (10 times seemed to always be enough for this bug to show at least once)
for _ in range(10):
qc = QuantumCircuit(qr)
qc.x(0)
dag = circuit_to_dag(qc)
dag1 = list(dag.layers())[0]["graph"]
dag1.apply_operation_back(IGate(), [qr[0]], [])
comp = [
(type(nd), nd.op.name if isinstance(nd, DAGOpNode) else nd.wire, nd._node_id)
for nd in dag1.topological_nodes()
]
self.assertEqual(comp, truth)
class TestCircuitProperties(QiskitTestCase):
"""DAGCircuit properties test."""
def setUp(self):
super().setUp()
qr1 = QuantumRegister(4)
qr2 = QuantumRegister(2)
circ = QuantumCircuit(qr1, qr2)
circ.h(qr1[0])
circ.cx(qr1[2], qr1[3])
circ.h(qr1[2])
circ.t(qr1[2])
circ.ch(qr1[2], qr1[1])
circ.u(0.0, 0.1, 0.2, qr1[3])
circ.ccx(qr2[0], qr2[1], qr1[0])
self.dag = circuit_to_dag(circ)
def test_circuit_size(self):
"""Test total number of operations in circuit."""
self.assertEqual(self.dag.size(), 7)
def test_circuit_depth(self):
"""Test circuit depth."""
self.assertEqual(self.dag.depth(), 4)
def test_circuit_width(self):
"""Test number of qubits + clbits in circuit."""
self.assertEqual(self.dag.width(), 6)
def test_circuit_num_qubits(self):
"""Test number of qubits in circuit."""
self.assertEqual(self.dag.num_qubits(), 6)
def test_circuit_operations(self):
"""Test circuit operations breakdown by kind of op."""
operations = {"h": 2, "t": 1, "u": 1, "cx": 1, "ch": 1, "ccx": 1}
self.assertDictEqual(self.dag.count_ops(), operations)
def test_circuit_factors(self):
"""Test number of separable factors in circuit."""
self.assertEqual(self.dag.num_tensor_factors(), 2)
class TestCircuitSpecialCases(QiskitTestCase):
"""DAGCircuit test for special cases, usually for regression."""
def test_circuit_depth_with_repetition(self):
"""When cx repeat, they are not "the same".
See https://github.com/Qiskit/qiskit-terra/issues/1994
"""
qr1 = QuantumRegister(2)
qr2 = QuantumRegister(2)
circ = QuantumCircuit(qr1, qr2)
circ.h(qr1[0])
circ.cx(qr1[1], qr2[1])
circ.cx(qr1[1], qr2[1])
circ.h(qr2[0])
dag = circuit_to_dag(circ)
self.assertEqual(dag.depth(), 2)
class TestDagEquivalence(QiskitTestCase):
"""DAGCircuit equivalence check."""
def setUp(self):
super().setUp()
self.qr1 = QuantumRegister(4, "qr1")
self.qr2 = QuantumRegister(2, "qr2")
circ1 = QuantumCircuit(self.qr1, self.qr2)
circ1.h(self.qr1[0])
circ1.cx(self.qr1[2], self.qr1[3])
circ1.h(self.qr1[2])
circ1.t(self.qr1[2])
circ1.ch(self.qr1[2], self.qr1[1])
circ1.u(0.0, 0.1, 0.2, self.qr1[3])
circ1.ccx(self.qr2[0], self.qr2[1], self.qr1[0])
self.dag1 = circuit_to_dag(circ1)
def test_dag_eq(self):
"""DAG equivalence check: True."""
circ2 = QuantumCircuit(self.qr1, self.qr2)
circ2.cx(self.qr1[2], self.qr1[3])
circ2.u(0.0, 0.1, 0.2, self.qr1[3])
circ2.h(self.qr1[0])
circ2.h(self.qr1[2])
circ2.t(self.qr1[2])
circ2.ch(self.qr1[2], self.qr1[1])
circ2.ccx(self.qr2[0], self.qr2[1], self.qr1[0])
dag2 = circuit_to_dag(circ2)
self.assertEqual(self.dag1, dag2)
def test_dag_neq_topology(self):
"""DAG equivalence check: False. Different topology."""
circ2 = QuantumCircuit(self.qr1, self.qr2)
circ2.cx(self.qr1[2], self.qr1[3])
circ2.u(0.0, 0.1, 0.2, self.qr1[3])
circ2.h(self.qr1[0])
circ2.h(self.qr1[2])
circ2.t(self.qr1[2])
circ2.ch(self.qr1[0], self.qr1[1]) # <--- The difference: ch(qr1[2], qr1[1])
circ2.ccx(self.qr2[0], self.qr2[1], self.qr1[0])
dag2 = circuit_to_dag(circ2)
self.assertNotEqual(self.dag1, dag2)
def test_dag_neq_same_topology(self):
"""DAG equivalence check: False. Same topology."""
circ2 = QuantumCircuit(self.qr1, self.qr2)
circ2.cx(self.qr1[2], self.qr1[3])
circ2.u(0.0, 0.1, 0.2, self.qr1[3])
circ2.h(self.qr1[0])
circ2.h(self.qr1[2])
circ2.t(self.qr1[2])
circ2.cx(self.qr1[2], self.qr1[1]) # <--- The difference: ch(qr1[2], qr1[1])
circ2.ccx(self.qr2[0], self.qr2[1], self.qr1[0])
dag2 = circuit_to_dag(circ2)
self.assertNotEqual(self.dag1, dag2)
def test_dag_from_networkx(self):
"""Test DAG from networkx creates an expected DAGCircuit object."""
from copy import deepcopy
from collections import OrderedDict
nx_graph = self.dag1.to_networkx()
from_nx_dag = DAGCircuit.from_networkx(nx_graph)
# to_/from_networkx does not preserve Registers or bit indexing,
# so remove them from reference DAG.
dag = deepcopy(self.dag1)
dag.qregs = OrderedDict()
dag.cregs = OrderedDict()
dag.qubits = from_nx_dag.qubits
dag.clbits = from_nx_dag.clbits
self.assertEqual(dag, from_nx_dag)
def test_node_params_equal_unequal(self):
"""Test node params are equal or unequal."""
qc1 = QuantumCircuit(1)
qc2 = QuantumCircuit(1)
qc3 = QuantumCircuit(1)
qc1.p(pi / 4, 0)
dag1 = circuit_to_dag(qc1)
qc2.p(pi / 4, 0)
dag2 = circuit_to_dag(qc2)
qc3.p(pi / 2, 0)
dag3 = circuit_to_dag(qc3)
self.assertEqual(dag1, dag2)
self.assertNotEqual(dag2, dag3)
class TestDagSubstitute(QiskitTestCase):
"""Test substituting a dag node with a sub-dag"""
def setUp(self):
super().setUp()
self.dag = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
self.dag.add_qreg(qreg)
self.dag.add_creg(creg)
self.qubit0 = qreg[0]
self.qubit1 = qreg[1]
self.qubit2 = qreg[2]
self.clbit0 = creg[0]
self.clbit1 = creg[1]
self.condition = (creg, 3)
self.dag.apply_operation_back(HGate(), [self.qubit0], [])
self.dag.apply_operation_back(CXGate(), [self.qubit0, self.qubit1], [])
self.dag.apply_operation_back(XGate(), [self.qubit1], [])
def test_substitute_circuit_one_middle(self):
"""The method substitute_node_with_dag() replaces a in-the-middle node with a DAG."""
cx_node = self.dag.op_nodes(op=CXGate).pop()
flipped_cx_circuit = DAGCircuit()
v = QuantumRegister(2, "v")
flipped_cx_circuit.add_qreg(v)
flipped_cx_circuit.apply_operation_back(HGate(), [v[0]], [])
flipped_cx_circuit.apply_operation_back(HGate(), [v[1]], [])
flipped_cx_circuit.apply_operation_back(CXGate(), [v[1], v[0]], [])
flipped_cx_circuit.apply_operation_back(HGate(), [v[0]], [])
flipped_cx_circuit.apply_operation_back(HGate(), [v[1]], [])
self.dag.substitute_node_with_dag(cx_node, flipped_cx_circuit, wires=[v[0], v[1]])
self.assertEqual(self.dag.count_ops()["h"], 5)
expected = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
expected.add_qreg(qreg)
expected.add_creg(creg)
expected.apply_operation_back(HGate(), [qreg[0]], [])
expected.apply_operation_back(HGate(), [qreg[0]], [])
expected.apply_operation_back(HGate(), [qreg[1]], [])
expected.apply_operation_back(CXGate(), [qreg[1], qreg[0]], [])
expected.apply_operation_back(HGate(), [qreg[0]], [])
expected.apply_operation_back(HGate(), [qreg[1]], [])
expected.apply_operation_back(XGate(), [qreg[1]], [])
self.assertEqual(self.dag, expected)
def test_substitute_circuit_one_front(self):
"""The method substitute_node_with_dag() replaces a leaf-in-the-front node with a DAG."""
circuit = DAGCircuit()
v = QuantumRegister(1, "v")
circuit.add_qreg(v)
circuit.apply_operation_back(HGate(), [v[0]], [])
circuit.apply_operation_back(XGate(), [v[0]], [])
self.dag.substitute_node_with_dag(next(self.dag.topological_op_nodes()), circuit)
expected = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
expected.add_qreg(qreg)
expected.add_creg(creg)
expected.apply_operation_back(HGate(), [qreg[0]], [])
expected.apply_operation_back(XGate(), [qreg[0]], [])
expected.apply_operation_back(CXGate(), [qreg[0], qreg[1]], [])
expected.apply_operation_back(XGate(), [qreg[1]], [])
self.assertEqual(self.dag, expected)
def test_substitute_circuit_one_back(self):
"""The method substitute_node_with_dag() replaces a leaf-in-the-back node with a DAG."""
circuit = DAGCircuit()
v = QuantumRegister(1, "v")
circuit.add_qreg(v)
circuit.apply_operation_back(HGate(), [v[0]], [])
circuit.apply_operation_back(XGate(), [v[0]], [])
self.dag.substitute_node_with_dag(list(self.dag.topological_op_nodes())[2], circuit)
expected = DAGCircuit()
qreg = QuantumRegister(3, "qr")
creg = ClassicalRegister(2, "cr")
expected.add_qreg(qreg)
expected.add_creg(creg)
expected.apply_operation_back(HGate(), [qreg[0]], [])
expected.apply_operation_back(CXGate(), [qreg[0], qreg[1]], [])
expected.apply_operation_back(HGate(), [qreg[1]], [])
expected.apply_operation_back(XGate(), [qreg[1]], [])
self.assertEqual(self.dag, expected)
def test_raise_if_substituting_dag_modifies_its_conditional(self):
"""Verify that we raise if the input dag modifies any of the bits in node.op.condition."""
# Our unroller's rely on substitute_node_with_dag carrying the condition
# from the node over to the input dag, which it does by making every
# node in the input dag conditioned over the same bits. However, if the
# input dag e.g. measures to one of those bits, the behavior of the
# remainder of the DAG would be different, so detect and raise in that
# case.
instr = Instruction("opaque", 1, 1, [])
instr.condition = self.condition
instr_node = self.dag.apply_operation_back(instr, [self.qubit0], [self.clbit1])
sub_dag = DAGCircuit()
sub_qr = QuantumRegister(1, "sqr")
sub_cr = ClassicalRegister(1, "scr")
sub_dag.add_qreg(sub_qr)
sub_dag.add_creg(sub_cr)
sub_dag.apply_operation_back(Measure(), [sub_qr[0]], [sub_cr[0]])
with self.assertRaises(DAGCircuitError):
self.dag.substitute_node_with_dag(instr_node, sub_dag)
@ddt
class TestDagSubstituteNode(QiskitTestCase):
"""Test substituting a dagnode with a node."""
def test_substituting_node_with_wrong_width_node_raises(self):
"""Verify replacing a node with one of a different shape raises."""
dag = DAGCircuit()
qr = QuantumRegister(2)
dag.add_qreg(qr)
node_to_be_replaced = dag.apply_operation_back(CXGate(), [qr[0], qr[1]])
with self.assertRaises(DAGCircuitError) as _:
dag.substitute_node(node_to_be_replaced, Measure())
@data(True, False)
def test_substituting_io_node_raises(self, inplace):
"""Verify replacing an io node raises."""
dag = DAGCircuit()
qr = QuantumRegister(1)
dag.add_qreg(qr)
io_node = next(dag.nodes())
with self.assertRaises(DAGCircuitError) as _:
dag.substitute_node(io_node, HGate(), inplace=inplace)
@data(True, False)
def test_substituting_node_preserves_args_condition(self, inplace):
"""Verify args and condition are preserved by a substitution."""
dag = DAGCircuit()
qr = QuantumRegister(2)
cr = ClassicalRegister(1)
dag.add_qreg(qr)
dag.add_creg(cr)
dag.apply_operation_back(HGate(), [qr[1]])
cx_gate = CXGate()
cx_gate.condition = (cr, 1)
node_to_be_replaced = dag.apply_operation_back(cx_gate, [qr[1], qr[0]])
dag.apply_operation_back(HGate(), [qr[1]])
replacement_node = dag.substitute_node(node_to_be_replaced, CZGate(), inplace=inplace)
raise_if_dagcircuit_invalid(dag)
self.assertEqual(replacement_node.op.name, "cz")
self.assertEqual(replacement_node.qargs, [qr[1], qr[0]])
self.assertEqual(replacement_node.cargs, [])
self.assertEqual(replacement_node.op.condition, (cr, 1))
self.assertEqual(replacement_node is node_to_be_replaced, inplace)
@data(True, False)
def test_substituting_node_preserves_parents_children(self, inplace):
"""Verify parents and children are preserved by a substitution."""
qc = QuantumCircuit(3, 2)
qc.cx(0, 1)
qc.cx(1, 2)
qc.rz(0.1, 2)
qc.cx(1, 2)
qc.cx(0, 1)
dag = circuit_to_dag(qc)
node_to_be_replaced = dag.named_nodes("rz")[0]
predecessors = set(dag.predecessors(node_to_be_replaced))
successors = set(dag.successors(node_to_be_replaced))
ancestors = dag.ancestors(node_to_be_replaced)
descendants = dag.descendants(node_to_be_replaced)
replacement_node = dag.substitute_node(node_to_be_replaced, U1Gate(0.1), inplace=inplace)
raise_if_dagcircuit_invalid(dag)
self.assertEqual(set(dag.predecessors(replacement_node)), predecessors)
self.assertEqual(set(dag.successors(replacement_node)), successors)
self.assertEqual(dag.ancestors(replacement_node), ancestors)
self.assertEqual(dag.descendants(replacement_node), descendants)
self.assertEqual(replacement_node is node_to_be_replaced, inplace)
class TestDagProperties(QiskitTestCase):
"""Test the DAG properties."""
def setUp(self):
super().setUp()
qr1 = QuantumRegister(4)
qr2 = QuantumRegister(2)
circ = QuantumCircuit(qr1, qr2)
circ.h(qr1[0])
circ.cx(qr1[2], qr1[3])
circ.h(qr1[2])
circ.t(qr1[2])
circ.ch(qr1[2], qr1[1])
circ.u(0.0, 0.1, 0.2, qr1[3])
circ.ccx(qr2[0], qr2[1], qr1[0])
self.dag = circuit_to_dag(circ)
def test_dag_size(self):
"""Test total number of operations in dag."""
self.assertEqual(self.dag.size(), 7)
def test_dag_depth(self):
"""Test dag depth."""
self.assertEqual(self.dag.depth(), 4)
def test_dag_width(self):
"""Test number of qubits + clbits in dag."""
self.assertEqual(self.dag.width(), 6)
def test_dag_num_qubits(self):
"""Test number of qubits in dag."""
self.assertEqual(self.dag.num_qubits(), 6)
def test_dag_operations(self):
"""Test dag operations breakdown by kind of op."""
operations = {"h": 2, "t": 1, "u": 1, "cx": 1, "ch": 1, "ccx": 1}
self.assertDictEqual(self.dag.count_ops(), operations)
def test_dag_factors(self):
"""Test number of separable factors in circuit."""
self.assertEqual(self.dag.num_tensor_factors(), 2)
def test_dag_depth_empty(self):
"""Empty circuit DAG is zero depth"""
q = QuantumRegister(5, "q")
qc = QuantumCircuit(q)
dag = circuit_to_dag(qc)
self.assertEqual(dag.depth(), 0)
def test_dag_idle_wires(self):
"""Test dag idle_wires."""
wires = list(self.dag.idle_wires())
self.assertEqual(len(wires), 0)
wires = list(self.dag.idle_wires(["u", "cx"]))
self.assertEqual(len(wires), 1)
def test_dag_depth1(self):
"""Test DAG depth #1"""
qr1 = QuantumRegister(3, "q1")
qr2 = QuantumRegister(2, "q2")
c = ClassicalRegister(5, "c")
qc = QuantumCircuit(qr1, qr2, c)
qc.h(qr1[0])
qc.h(qr1[1])
qc.h(qr1[2])
qc.h(qr2[0])
qc.h(qr2[1])
qc.ccx(qr2[1], qr1[0], qr2[0])
qc.cx(qr1[0], qr1[1])
qc.cx(qr1[1], qr2[1])
qc.cx(qr2[1], qr1[2])
qc.cx(qr1[2], qr2[0])
dag = circuit_to_dag(qc)
self.assertEqual(dag.depth(), 6)
def test_dag_depth2(self):
"""Test barrier increases DAG depth"""
q = QuantumRegister(5, "q")
c = ClassicalRegister(1, "c")
qc = QuantumCircuit(q, c)
qc.h(q[0])
qc.cx(q[0], q[4])
qc.x(q[2])
qc.x(q[2])
qc.x(q[2])
qc.x(q[4])
qc.cx(q[4], q[1])
qc.barrier(q)
qc.measure(q[1], c[0])
dag = circuit_to_dag(qc)
self.assertEqual(dag.depth(), 6)
def test_dag_depth3(self):
"""Test DAG depth for silly circuit."""
q = QuantumRegister(6, "q")
c = ClassicalRegister(1, "c")
qc = QuantumCircuit(q, c)
qc.h(q[0])
qc.cx(q[0], q[1])
qc.cx(q[1], q[2])
qc.cx(q[2], q[3])
qc.cx(q[3], q[4])
qc.cx(q[4], q[5])
qc.barrier(q[0])
qc.barrier(q[0])
qc.measure(q[0], c[0])
dag = circuit_to_dag(qc)
self.assertEqual(dag.depth(), 6)
class TestConditional(QiskitTestCase):
"""Test the classical conditional gates."""
def setUp(self):
super().setUp()
self.qreg = QuantumRegister(3, "q")
self.creg = ClassicalRegister(2, "c")
self.creg2 = ClassicalRegister(2, "c2")
self.qubit0 = self.qreg[0]
self.circuit = QuantumCircuit(self.qreg, self.creg, self.creg2)
self.dag = None
def test_creg_conditional(self):
"""Test consistency of conditional on classical register."""
self.circuit.h(self.qreg[0]).c_if(self.creg, 1)
self.dag = circuit_to_dag(self.circuit)
gate_node = self.dag.gate_nodes()[0]
self.assertEqual(gate_node.op, HGate())
self.assertEqual(gate_node.qargs, [self.qreg[0]])
self.assertEqual(gate_node.cargs, [])
self.assertEqual(gate_node.op.condition, (self.creg, 1))
self.assertEqual(
sorted(self.dag._multi_graph.in_edges(gate_node._node_id)),
sorted(
[
(self.dag.input_map[self.qreg[0]]._node_id, gate_node._node_id, self.qreg[0]),
(self.dag.input_map[self.creg[0]]._node_id, gate_node._node_id, self.creg[0]),
(self.dag.input_map[self.creg[1]]._node_id, gate_node._node_id, self.creg[1]),
]
),
)
self.assertEqual(
sorted(self.dag._multi_graph.out_edges(gate_node._node_id)),
sorted(
[
(gate_node._node_id, self.dag.output_map[self.qreg[0]]._node_id, self.qreg[0]),
(gate_node._node_id, self.dag.output_map[self.creg[0]]._node_id, self.creg[0]),
(gate_node._node_id, self.dag.output_map[self.creg[1]]._node_id, self.creg[1]),
]
),
)
def test_clbit_conditional(self):
"""Test consistency of conditional on single classical bit."""
self.circuit.h(self.qreg[0]).c_if(self.creg[0], 1)
self.dag = circuit_to_dag(self.circuit)
gate_node = self.dag.gate_nodes()[0]
self.assertEqual(gate_node.op, HGate())
self.assertEqual(gate_node.qargs, [self.qreg[0]])
self.assertEqual(gate_node.cargs, [])
self.assertEqual(gate_node.op.condition, (self.creg[0], 1))
self.assertEqual(
sorted(self.dag._multi_graph.in_edges(gate_node._node_id)),
sorted(
[
(self.dag.input_map[self.qreg[0]]._node_id, gate_node._node_id, self.qreg[0]),
(self.dag.input_map[self.creg[0]]._node_id, gate_node._node_id, self.creg[0]),
]
),
)
self.assertEqual(
sorted(self.dag._multi_graph.out_edges(gate_node._node_id)),
sorted(
[
(gate_node._node_id, self.dag.output_map[self.qreg[0]]._node_id, self.qreg[0]),
(gate_node._node_id, self.dag.output_map[self.creg[0]]._node_id, self.creg[0]),
]
),
)
class TestDAGDeprecations(QiskitTestCase):
"""Test DAG deprecations"""
def test_DAGNode_deprecations(self):
"""Test DAGNode deprecations."""
from qiskit.dagcircuit import DAGNode
qr = QuantumRegister(1, "qr")
cr = ClassicalRegister(1, "cr")
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
op_node = DAGNode(type="op", op=HGate(), qargs=[qr[0]], cargs=[cr[0]])
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
in_node = DAGNode(type="in", wire=qr[0])
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
out_node = DAGNode(type="out", wire=cr[0])
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = op_node.type
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = op_node.op
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = op_node.qargs
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = op_node.cargs
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = in_node.wire
with self.assertWarnsRegex(DeprecationWarning, "deprecated"):
_ = out_node.wire
if __name__ == "__main__":
unittest.main()
| 40.453503 | 100 | 0.613018 |
7956950df580d87e4ef2e4e2720344b10084e8c9 | 789 | bzl | Python | google/cloud/pubsub/samples/pubsub_samples_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | 299 | 2019-01-31T12:17:56.000Z | 2022-03-30T15:46:15.000Z | google/cloud/pubsub/samples/pubsub_samples_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | 6,560 | 2019-01-29T03:15:15.000Z | 2022-03-31T23:58:48.000Z | google/cloud/pubsub/samples/pubsub_samples_unit_tests.bzl | joezqren/google-cloud-cpp | 325d312b0a21569f3c57515aec7d91f3540d3b48 | [
"Apache-2.0"
] | 253 | 2019-02-07T01:18:13.000Z | 2022-03-30T17:21:10.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
pubsub_samples_unit_tests = [
"pubsub_samples_common_test.cc",
]
| 35.863636 | 79 | 0.754119 |
795695ea947ef973292ca04d2b47eea48b028d32 | 54,173 | py | Python | edb/edgeql/parser/grammar/expressions.py | sfermigier/edgedb | 13aff7004aa682777287157dea52642c374967e8 | [
"Apache-2.0"
] | 7,302 | 2018-05-10T18:36:31.000Z | 2022-03-31T17:49:36.000Z | edb/edgeql/parser/grammar/expressions.py | sfermigier/edgedb | 13aff7004aa682777287157dea52642c374967e8 | [
"Apache-2.0"
] | 1,602 | 2018-05-10T17:45:38.000Z | 2022-03-31T23:46:19.000Z | edb/edgeql/parser/grammar/expressions.py | sfermigier/edgedb | 13aff7004aa682777287157dea52642c374967e8 | [
"Apache-2.0"
] | 236 | 2018-05-13T14:15:29.000Z | 2022-03-29T19:39:19.000Z | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import collections
import typing
from edb.common import parsing, context
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb.errors import EdgeQLSyntaxError
from . import keywords
from . import precedence
from . import tokens
from .precedence import * # NOQA
from .tokens import * # NOQA
class Nonterm(parsing.Nonterm):
pass
class ListNonterm(parsing.ListNonterm, element=None):
pass
class ExprStmt(Nonterm):
def reduce_WithBlock_ExprStmtCore(self, *kids):
self.val = kids[1].val
self.val.aliases = kids[0].val.aliases
def reduce_ExprStmtCore(self, *kids):
self.val = kids[0].val
class ExprStmtCore(Nonterm):
def reduce_SimpleFor(self, *kids):
self.val = kids[0].val
def reduce_SimpleSelect(self, *kids):
self.val = kids[0].val
def reduce_SimpleGroup(self, *kids):
self.val = kids[0].val
def reduce_SimpleInsert(self, *kids):
self.val = kids[0].val
def reduce_SimpleUpdate(self, *kids):
self.val = kids[0].val
def reduce_SimpleDelete(self, *kids):
self.val = kids[0].val
class AliasedExpr(Nonterm):
def reduce_Identifier_ASSIGN_Expr(self, *kids):
self.val = qlast.AliasedExpr(alias=kids[0].val, expr=kids[2].val)
class OptionallyAliasedExpr(Nonterm):
def reduce_AliasedExpr(self, *kids):
val = kids[0].val
self.val = AliasedExprSpec(alias=val.alias, expr=val.expr)
def reduce_Expr(self, *kids):
self.val = AliasedExprSpec(alias=None, expr=kids[0].val)
class AliasedExprList(ListNonterm, element=AliasedExpr,
separator=tokens.T_COMMA):
pass
# NOTE: This is intentionally not an AST node, since this structure never
# makes it to the actual AST and exists solely for parser convenience.
AliasedExprSpec = collections.namedtuple(
'AliasedExprSpec', ['alias', 'expr'], module=__name__)
# ByExpr will eventually be expanded to include more than just
# Identifiers as its members (such as CUBE, ROLLUP and grouping sets).
class ByExpr(Nonterm):
def reduce_Identifier(self, *kids):
self.val = qlast.Path(steps=[qlast.ObjectRef(name=kids[0].val)])
class ByExprList(ListNonterm, element=ByExpr, separator=tokens.T_COMMA):
pass
class SimpleFor(Nonterm):
def reduce_For(self, *kids):
r"%reduce FOR Identifier IN Set \
UNION OptionallyAliasedExpr"
self.val = qlast.ForQuery(
iterator_alias=kids[1].val,
iterator=kids[3].val,
result=kids[5].val.expr,
result_alias=kids[5].val.alias,
)
class SimpleSelect(Nonterm):
def reduce_Select(self, *kids):
r"%reduce SELECT OptionallyAliasedExpr \
OptFilterClause OptSortClause OptSelectLimit"
offset, limit = kids[4].val
if offset is not None or limit is not None:
subj = qlast.SelectQuery(
result=kids[1].val.expr,
result_alias=kids[1].val.alias,
where=kids[2].val,
orderby=kids[3].val,
implicit=True,
)
self.val = qlast.SelectQuery(
result=subj,
offset=offset,
limit=limit,
)
else:
self.val = qlast.SelectQuery(
result=kids[1].val.expr,
result_alias=kids[1].val.alias,
where=kids[2].val,
orderby=kids[3].val,
)
class SimpleGroup(Nonterm):
def reduce_Group(self, *kids):
r"%reduce GROUP OptionallyAliasedExpr \
USING AliasedExprList \
BY ByExprList \
INTO Identifier \
UNION OptionallyAliasedExpr \
OptFilterClause OptSortClause OptSelectLimit"
self.val = qlast.GroupQuery(
subject=kids[1].val.expr,
subject_alias=kids[1].val.alias,
using=kids[3].val,
by=kids[5].val,
into=kids[7].val,
result=kids[9].val.expr,
result_alias=kids[9].val.alias,
where=kids[10].val,
orderby=kids[11].val,
offset=kids[12].val[0],
limit=kids[12].val[1],
)
class SimpleInsert(Nonterm):
def reduce_Insert(self, *kids):
r'%reduce INSERT OptionallyAliasedExpr OptUnlessConflictClause'
subj = kids[1].val.expr
subj_alias = kids[1].val.alias
# check that the insert subject is either a path or a shape
if isinstance(subj, qlast.Shape):
objtype = subj.expr
shape = subj.elements
else:
objtype = subj
shape = []
unless_conflict = kids[2].val
if not isinstance(objtype, qlast.Path):
raise EdgeQLSyntaxError(
"insert expression must be an object type reference",
context=subj.context)
self.val = qlast.InsertQuery(
subject=objtype,
subject_alias=subj_alias,
shape=shape,
unless_conflict=unless_conflict,
)
class SimpleUpdate(Nonterm):
def reduce_Update(self, *kids):
"%reduce UPDATE OptionallyAliasedExpr OptFilterClause SET Shape"
self.val = qlast.UpdateQuery(
subject=kids[1].val.expr,
subject_alias=kids[1].val.alias,
where=kids[2].val,
shape=kids[4].val,
)
class SimpleDelete(Nonterm):
def reduce_Delete(self, *kids):
r"%reduce DELETE OptionallyAliasedExpr \
OptFilterClause OptSortClause OptSelectLimit"
self.val = qlast.DeleteQuery(
subject=kids[1].val.expr,
subject_alias=kids[1].val.alias,
where=kids[2].val,
orderby=kids[3].val,
offset=kids[4].val[0],
limit=kids[4].val[1],
)
WithBlockData = collections.namedtuple(
'WithBlockData', ['aliases'], module=__name__)
class WithBlock(Nonterm):
def reduce_WITH_WithDeclList(self, *kids):
aliases = []
for w in kids[1].val:
aliases.append(w)
self.val = WithBlockData(aliases=aliases)
def reduce_WITH_WithDeclList_COMMA(self, *kids):
aliases = []
for w in kids[1].val:
aliases.append(w)
self.val = WithBlockData(aliases=aliases)
class AliasDecl(Nonterm):
def reduce_MODULE_ModuleName(self, *kids):
self.val = qlast.ModuleAliasDecl(
module='.'.join(kids[1].val))
def reduce_Identifier_AS_MODULE_ModuleName(self, *kids):
self.val = qlast.ModuleAliasDecl(
alias=kids[0].val,
module='.'.join(kids[3].val))
def reduce_AliasedExpr(self, *kids):
self.val = kids[0].val
class WithDecl(Nonterm):
def reduce_AliasDecl(self, *kids):
self.val = kids[0].val
class WithDeclList(ListNonterm, element=WithDecl,
separator=tokens.T_COMMA):
pass
class Shape(Nonterm):
def reduce_LBRACE_RBRACE(self, *kids):
self.val = None
def reduce_LBRACE_ShapeElementList_RBRACE(self, *kids):
self.val = kids[1].val
def reduce_LBRACE_ShapeElementList_COMMA_RBRACE(self, *kids):
self.val = kids[1].val
class OptShape(Nonterm):
def reduce_Shape(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = []
class TypedShape(Nonterm):
def reduce_NodeName_OptShape(self, *kids):
self.val = qlast.Shape(
expr=qlast.Path(
steps=[qlast.ObjectRef(
name=kids[0].val.name,
module=kids[0].val.module,
context=kids[0].context)
]
),
elements=kids[1].val
)
class FreeShape(Nonterm):
def reduce_LBRACE_FreeComputableShapePointerList_RBRACE(self, *kids):
self.val = qlast.Shape(elements=kids[1].val)
def reduce_LBRACE_FreeComputableShapePointerList_COMMA_RBRACE(self, *kids):
self.val = qlast.Shape(elements=kids[1].val)
class OptAnySubShape(Nonterm):
def reduce_COLON_Shape(self, *kids):
self.val = kids[1].val
def reduce_LBRACE(self, *kids):
raise EdgeQLSyntaxError(
f"Missing ':' before '{{' in a sub-shape",
context=kids[0].context)
def reduce_Shape(self, *kids):
raise EdgeQLSyntaxError(
f"Missing ':' before '{{' in a sub-shape",
context=kids[0].context)
def reduce_empty(self, *kids):
self.val = []
class ShapeElement(Nonterm):
def reduce_ShapeElementWithSubShape(self, *kids):
r"""%reduce ShapePointer \
OptAnySubShape OptFilterClause OptSortClause OptSelectLimit \
"""
self.val = kids[0].val
self.val.elements = kids[1].val
self.val.where = kids[2].val
self.val.orderby = kids[3].val
self.val.offset = kids[4].val[0]
self.val.limit = kids[4].val[1]
def reduce_ComputableShapePointer(self, *kids):
self.val = kids[0].val
class ShapeElementList(ListNonterm, element=ShapeElement,
separator=tokens.T_COMMA):
pass
class VerySimpleShapePath(Nonterm):
def reduce_PathStepName(self, *kids):
from edb.schema import pointers as s_pointers
steps = [
qlast.Ptr(
ptr=kids[0].val,
direction=s_pointers.PointerDirection.Outbound
),
]
self.val = qlast.Path(steps=steps)
class SimpleShapePath(Nonterm):
def reduce_VerySimpleShapePath(self, *kids):
self.val = kids[0].val
def reduce_AT_ShortNodeName(self, *kids):
self.val = qlast.Path(
steps=[
qlast.Ptr(
ptr=kids[1].val,
type='property'
)
]
)
class SimpleShapePointer(Nonterm):
def reduce_SimpleShapePath(self, *kids):
self.val = qlast.ShapeElement(
expr=kids[0].val
)
# Shape pointers in free shapes are not allowed to be link
# properties. This is because we need to be able to distinguish
# free shapes from set literals with only one token of lookahead
# (since this is an LL(1) parser) and seeing the := after @ident would
# require two tokens of lookahead.
class FreeSimpleShapePointer(Nonterm):
def reduce_VerySimpleShapePath(self, *kids):
self.val = qlast.ShapeElement(
expr=kids[0].val
)
class ShapePath(Nonterm):
# A form of Path appearing as an element in shapes.
#
# one-of:
# __type__
# link
# @prop
# [IS ObjectType].link
# [IS Link]@prop - currently not supported
def reduce_PathStepName_OptTypeIntersection(self, *kids):
from edb.schema import pointers as s_pointers
steps = [
qlast.Ptr(
ptr=kids[0].val,
direction=s_pointers.PointerDirection.Outbound
),
]
if kids[1].val is not None:
steps.append(kids[1].val)
self.val = qlast.Path(steps=steps)
def reduce_AT_ShortNodeName(self, *kids):
self.val = qlast.Path(
steps=[
qlast.Ptr(
ptr=kids[1].val,
type='property'
)
]
)
def reduce_TypeIntersection_DOT_PathStepName_OptTypeIntersection(
self, *kids):
from edb.schema import pointers as s_pointers
steps = [
kids[0].val,
qlast.Ptr(
ptr=kids[2].val,
direction=s_pointers.PointerDirection.Outbound
),
]
if kids[3].val is not None:
steps.append(kids[3].val)
self.val = qlast.Path(steps=steps)
class ShapePointer(Nonterm):
def reduce_ShapePath(self, *kids):
self.val = qlast.ShapeElement(
expr=kids[0].val
)
class PtrQualsSpec(typing.NamedTuple):
required: typing.Optional[bool] = None
cardinality: typing.Optional[qltypes.SchemaCardinality] = None
class PtrQuals(Nonterm):
def reduce_OPTIONAL(self, *kids):
self.val = PtrQualsSpec(required=False)
def reduce_REQUIRED(self, *kids):
self.val = PtrQualsSpec(required=True)
def reduce_SINGLE(self, *kids):
self.val = PtrQualsSpec(cardinality=qltypes.SchemaCardinality.One)
def reduce_MULTI(self, *kids):
self.val = PtrQualsSpec(cardinality=qltypes.SchemaCardinality.Many)
def reduce_OPTIONAL_SINGLE(self, *kids):
self.val = PtrQualsSpec(
required=False, cardinality=qltypes.SchemaCardinality.One)
def reduce_OPTIONAL_MULTI(self, *kids):
self.val = PtrQualsSpec(
required=False, cardinality=qltypes.SchemaCardinality.Many)
def reduce_REQUIRED_SINGLE(self, *kids):
self.val = PtrQualsSpec(
required=True, cardinality=qltypes.SchemaCardinality.One)
def reduce_REQUIRED_MULTI(self, *kids):
self.val = PtrQualsSpec(
required=True, cardinality=qltypes.SchemaCardinality.Many)
class OptPtrQuals(Nonterm):
def reduce_empty(self, *kids):
self.val = PtrQualsSpec()
def reduce_PtrQuals(self, *kids):
self.val = kids[0].val
# We have to inline the OptPtrQuals here because the parser generator
# fails to cope with a shift/reduce on a REQUIRED token, since PtrQuals
# are followed by an ident in this case (unlike in DDL, where it is followed
# by a keyword).
class ComputableShapePointer(Nonterm):
def reduce_OPTIONAL_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.required = False
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_REQUIRED_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.required = True
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_MULTI_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_SINGLE_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_OPTIONAL_MULTI_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = False
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_OPTIONAL_SINGLE_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = False
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_REQUIRED_MULTI_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = True
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_REQUIRED_SINGLE_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = True
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_SimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[0].val
self.val.compexpr = kids[2].val
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[1].context,
)
def reduce_SimpleShapePointer_ADDASSIGN_Expr(self, *kids):
self.val = kids[0].val
self.val.compexpr = kids[2].val
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.APPEND,
context=kids[1].context,
)
def reduce_SimpleShapePointer_REMASSIGN_Expr(self, *kids):
self.val = kids[0].val
self.val.compexpr = kids[2].val
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.SUBTRACT,
context=kids[1].context,
)
# This is the same as the above ComputableShapePointer, except using
# FreeSimpleShapePointer and not allowing +=/-=.
class FreeComputableShapePointer(Nonterm):
def reduce_OPTIONAL_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.required = False
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_REQUIRED_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.required = True
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_MULTI_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_SINGLE_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[1].val
self.val.compexpr = kids[3].val
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[2].context,
)
def reduce_OPTIONAL_MULTI_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = False
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_OPTIONAL_SINGLE_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = False
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_REQUIRED_MULTI_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = True
self.val.cardinality = qltypes.SchemaCardinality.Many
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_REQUIRED_SINGLE_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[2].val
self.val.compexpr = kids[4].val
self.val.required = True
self.val.cardinality = qltypes.SchemaCardinality.One
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[3].context,
)
def reduce_FreeSimpleShapePointer_ASSIGN_Expr(self, *kids):
self.val = kids[0].val
self.val.compexpr = kids[2].val
self.val.operation = qlast.ShapeOperation(
op=qlast.ShapeOp.ASSIGN,
context=kids[1].context,
)
class FreeComputableShapePointerList(ListNonterm,
element=FreeComputableShapePointer,
separator=tokens.T_COMMA):
pass
class UnlessConflictSpecifier(Nonterm):
def reduce_ON_Expr_ELSE_Expr(self, *kids):
self.val = (kids[1].val, kids[3].val)
def reduce_ON_Expr(self, *kids):
self.val = (kids[1].val, None)
def reduce_empty(self, *kids):
self.val = (None, None)
class UnlessConflictCause(Nonterm):
def reduce_UNLESS_CONFLICT_UnlessConflictSpecifier(self, *kids):
self.val = kids[2].val
class OptUnlessConflictClause(Nonterm):
def reduce_UnlessConflictCause(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = None
class FilterClause(Nonterm):
def reduce_FILTER_Expr(self, *kids):
self.val = kids[1].val
class OptFilterClause(Nonterm):
def reduce_FilterClause(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = None
class SortClause(Nonterm):
def reduce_ORDER_BY_OrderbyList(self, *kids):
self.val = kids[2].val
class OptSortClause(Nonterm):
def reduce_SortClause(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = []
class OrderbyExpr(Nonterm):
def reduce_Expr_OptDirection_OptNonesOrder(self, *kids):
self.val = qlast.SortExpr(path=kids[0].val,
direction=kids[1].val,
nones_order=kids[2].val)
class OrderbyList(ListNonterm, element=OrderbyExpr,
separator=tokens.T_THEN):
pass
class OptSelectLimit(Nonterm):
def reduce_SelectLimit(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = (None, None)
class SelectLimit(Nonterm):
def reduce_OffsetClause_LimitClause(self, *kids):
self.val = (kids[0].val, kids[1].val)
def reduce_OffsetClause(self, *kids):
self.val = (kids[0].val, None)
def reduce_LimitClause(self, *kids):
self.val = (None, kids[0].val)
class OffsetClause(Nonterm):
def reduce_OFFSET_Expr(self, *kids):
self.val = kids[1].val
class LimitClause(Nonterm):
def reduce_LIMIT_Expr(self, *kids):
self.val = kids[1].val
class OptDirection(Nonterm):
def reduce_ASC(self, *kids):
self.val = qlast.SortAsc
def reduce_DESC(self, *kids):
self.val = qlast.SortDesc
def reduce_empty(self, *kids):
self.val = qlast.SortDefault
class OptNonesOrder(Nonterm):
def reduce_EMPTY_FIRST(self, *kids):
self.val = qlast.NonesFirst
def reduce_EMPTY_LAST(self, *kids):
self.val = qlast.NonesLast
def reduce_empty(self, *kids):
self.val = None
class IndirectionEl(Nonterm):
def reduce_LBRACKET_Expr_RBRACKET(self, *kids):
self.val = qlast.Index(index=kids[1].val)
def reduce_LBRACKET_Expr_COLON_Expr_RBRACKET(self, *kids):
self.val = qlast.Slice(start=kids[1].val, stop=kids[3].val)
def reduce_LBRACKET_Expr_COLON_RBRACKET(self, *kids):
self.val = qlast.Slice(start=kids[1].val, stop=None)
def reduce_LBRACKET_COLON_Expr_RBRACKET(self, *kids):
self.val = qlast.Slice(start=None, stop=kids[2].val)
class ParenExpr(Nonterm):
def reduce_LPAREN_Expr_RPAREN(self, *kids):
self.val = kids[1].val
def reduce_LPAREN_ExprStmt_RPAREN(self, *kids):
self.val = kids[1].val
class Expr(Nonterm):
# Path | Expr { ... } | Constant | '(' Expr ')' | FuncExpr
# | Tuple | NamedTuple | Collection | Set
# | '+' Expr | '-' Expr | Expr '+' Expr | Expr '-' Expr
# | Expr '*' Expr | Expr '/' Expr | Expr '%' Expr
# | Expr '**' Expr | Expr '<' Expr | Expr '>' Expr
# | Expr '=' Expr
# | Expr AND Expr | Expr OR Expr | NOT Expr
# | Expr LIKE Expr | Expr NOT LIKE Expr
# | Expr ILIKE Expr | Expr NOT ILIKE Expr
# | Expr IS TypeExpr | Expr IS NOT TypeExpr
# | INTROSPECT TypeExpr
# | Expr IN Expr | Expr NOT IN Expr
# | Expr '[' Expr ']'
# | Expr '[' Expr ':' Expr ']'
# | Expr '[' ':' Expr ']'
# | Expr '[' Expr ':' ']'
# | Expr '[' IS NodeName ']'
# | '<' TypeName '>' Expr
# | Expr IF Expr ELSE Expr
# | Expr ?? Expr
# | Expr UNION Expr | Expr UNION Expr
# | DISTINCT Expr
# | DETACHED Expr
# | EXISTS Expr
# | '__source__' | '__subject__'
def reduce_Path(self, *kids):
self.val = kids[0].val
def reduce_Expr_Shape(self, *kids):
self.val = qlast.Shape(expr=kids[0].val, elements=kids[1].val)
def reduce_FreeShape(self, *kids):
self.val = kids[0].val
def reduce_Constant(self, *kids):
self.val = kids[0].val
def reduce_DUNDERSOURCE(self, *kids):
self.val = qlast.Path(steps=[qlast.Source()])
def reduce_DUNDERSUBJECT(self, *kids):
self.val = qlast.Path(steps=[qlast.Subject()])
@parsing.precedence(precedence.P_UMINUS)
def reduce_ParenExpr(self, *kids):
self.val = kids[0].val
def reduce_Expr_IndirectionEl(self, *kids):
expr = kids[0].val
if isinstance(expr, qlast.Indirection):
self.val = expr
expr.indirection.append(kids[1].val)
else:
self.val = qlast.Indirection(arg=expr,
indirection=[kids[1].val])
def reduce_FuncExpr(self, *kids):
self.val = kids[0].val
def reduce_Tuple(self, *kids):
self.val = kids[0].val
def reduce_Collection(self, *kids):
self.val = kids[0].val
def reduce_Set(self, *kids):
self.val = kids[0].val
def reduce_NamedTuple(self, *kids):
self.val = kids[0].val
def reduce_EXISTS_Expr(self, *kids):
self.val = qlast.UnaryOp(op='EXISTS', operand=kids[1].val)
def reduce_DISTINCT_Expr(self, *kids):
self.val = qlast.UnaryOp(op='DISTINCT', operand=kids[1].val)
def reduce_DETACHED_Expr(self, *kids):
self.val = qlast.DetachedExpr(expr=kids[1].val)
@parsing.precedence(precedence.P_UMINUS)
def reduce_PLUS_Expr(self, *kids):
self.val = qlast.UnaryOp(op=kids[0].val, operand=kids[1].val)
@parsing.precedence(precedence.P_UMINUS)
def reduce_MINUS_Expr(self, *kids):
arg = kids[1].val
if isinstance(arg, qlast.BaseRealConstant):
# Special case for -<real_const> so that type inference based
# on literal size works correctly in the case of INT_MIN and
# friends.
self.val = type(arg)(value=arg.value, is_negative=True)
else:
self.val = qlast.UnaryOp(op=kids[0].val, operand=arg)
def reduce_Expr_PLUS_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_DOUBLEPLUS_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_MINUS_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_STAR_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_SLASH_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_DOUBLESLASH_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_PERCENT_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_CIRCUMFLEX_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_LANGBRACKET_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_RANGBRACKET_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
@parsing.precedence(precedence.P_DOUBLEQMARK_OP)
def reduce_Expr_DOUBLEQMARK_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_EQUALS_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
@parsing.precedence(precedence.P_OP)
def reduce_Expr_OP_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val,
right=kids[2].val)
def reduce_Expr_AND_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val.upper(),
right=kids[2].val)
def reduce_Expr_OR_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op=kids[1].val.upper(),
right=kids[2].val)
def reduce_NOT_Expr(self, *kids):
self.val = qlast.UnaryOp(op=kids[0].val.upper(), operand=kids[1].val)
def reduce_Expr_LIKE_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op='LIKE',
right=kids[2].val)
def reduce_Expr_NOT_LIKE_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op='NOT LIKE',
right=kids[3].val)
def reduce_Expr_ILIKE_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op='ILIKE',
right=kids[2].val)
def reduce_Expr_NOT_ILIKE_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op='NOT ILIKE',
right=kids[3].val)
def reduce_Expr_IS_TypeExpr(self, *kids):
self.val = qlast.IsOp(left=kids[0].val, op='IS',
right=kids[2].val)
@parsing.precedence(precedence.P_IS)
def reduce_Expr_IS_NOT_TypeExpr(self, *kids):
self.val = qlast.IsOp(left=kids[0].val, op='IS NOT',
right=kids[3].val)
def reduce_INTROSPECT_TypeExpr(self, *kids):
self.val = qlast.Introspect(type=kids[1].val)
def reduce_Expr_IN_Expr(self, *kids):
inexpr = kids[2].val
self.val = qlast.BinOp(left=kids[0].val, op='IN',
right=inexpr)
@parsing.precedence(precedence.P_IN)
def reduce_Expr_NOT_IN_Expr(self, *kids):
inexpr = kids[3].val
self.val = qlast.BinOp(left=kids[0].val, op='NOT IN',
right=inexpr)
@parsing.precedence(precedence.P_TYPECAST)
def reduce_LANGBRACKET_FullTypeExpr_RANGBRACKET_Expr(
self, *kids):
self.val = qlast.TypeCast(
expr=kids[3].val,
type=kids[1].val,
cardinality_mod=None,
)
@parsing.precedence(precedence.P_TYPECAST)
def reduce_LANGBRACKET_OPTIONAL_FullTypeExpr_RANGBRACKET_Expr(
self, *kids):
self.val = qlast.TypeCast(
expr=kids[4].val,
type=kids[2].val,
cardinality_mod=qlast.CardinalityModifier.Optional,
)
@parsing.precedence(precedence.P_TYPECAST)
def reduce_LANGBRACKET_REQUIRED_FullTypeExpr_RANGBRACKET_Expr(
self, *kids):
self.val = qlast.TypeCast(
expr=kids[4].val,
type=kids[2].val,
cardinality_mod=qlast.CardinalityModifier.Required,
)
def reduce_Expr_IF_Expr_ELSE_Expr(self, *kids):
self.val = qlast.IfElse(
if_expr=kids[0].val, condition=kids[2].val, else_expr=kids[4].val)
def reduce_Expr_UNION_Expr(self, *kids):
self.val = qlast.BinOp(left=kids[0].val, op='UNION',
right=kids[2].val)
class Tuple(Nonterm):
def reduce_LPAREN_Expr_COMMA_OptExprList_RPAREN(self, *kids):
self.val = qlast.Tuple(elements=[kids[1].val] + kids[3].val)
def reduce_LPAREN_RPAREN(self, *kids):
self.val = qlast.Tuple(elements=[])
class NamedTuple(Nonterm):
def reduce_LPAREN_NamedTupleElementList_RPAREN(self, *kids):
self.val = qlast.NamedTuple(elements=kids[1].val)
def reduce_LPAREN_NamedTupleElementList_COMMA_RPAREN(self, *kids):
self.val = qlast.NamedTuple(elements=kids[1].val)
class NamedTupleElement(Nonterm):
def reduce_ShortNodeName_ASSIGN_Expr(self, *kids):
self.val = qlast.TupleElement(
name=kids[0].val,
val=kids[2].val
)
class NamedTupleElementList(ListNonterm, element=NamedTupleElement,
separator=tokens.T_COMMA):
pass
class Set(Nonterm):
def reduce_LBRACE_OptExprList_RBRACE(self, *kids):
self.val = qlast.Set(elements=kids[1].val)
class Collection(Nonterm):
def reduce_LBRACKET_OptExprList_RBRACKET(self, *kids):
elements = kids[1].val
self.val = qlast.Array(elements=elements)
class OptExprList(Nonterm):
def reduce_ExprList_COMMA(self, *kids):
self.val = kids[0].val
def reduce_ExprList(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = []
class ExprList(ListNonterm, element=Expr, separator=tokens.T_COMMA):
pass
class Constant(Nonterm):
# ARGUMENT
# | BaseNumberConstant
# | BaseStringConstant
# | BaseBooleanConstant
# | BaseBytesConstant
def reduce_ARGUMENT(self, *kids):
self.val = qlast.Parameter(name=kids[0].val[1:], optional=False)
def reduce_BaseNumberConstant(self, *kids):
self.val = kids[0].val
def reduce_BaseStringConstant(self, *kids):
self.val = kids[0].val
def reduce_BaseBooleanConstant(self, *kids):
self.val = kids[0].val
def reduce_BaseBytesConstant(self, *kids):
self.val = kids[0].val
class BaseNumberConstant(Nonterm):
def reduce_ICONST(self, *kids):
self.val = qlast.IntegerConstant(value=kids[0].val)
def reduce_FCONST(self, *kids):
self.val = qlast.FloatConstant(value=kids[0].val)
def reduce_NICONST(self, *kids):
self.val = qlast.BigintConstant(value=kids[0].val)
def reduce_NFCONST(self, *kids):
self.val = qlast.DecimalConstant(value=kids[0].val)
class BaseStringConstant(Nonterm):
def reduce_SCONST(self, token):
self.val = qlast.StringConstant(value=token.clean_value)
class BaseBytesConstant(Nonterm):
def reduce_BCONST(self, bytes_tok):
self.val = qlast.BytesConstant(value=bytes_tok.clean_value)
class BaseBooleanConstant(Nonterm):
def reduce_TRUE(self, *kids):
self.val = qlast.BooleanConstant(value='true')
def reduce_FALSE(self, *kids):
self.val = qlast.BooleanConstant(value='false')
class Path(Nonterm):
@parsing.precedence(precedence.P_DOT)
def reduce_NodeName(self, *kids):
self.val = qlast.Path(
steps=[qlast.ObjectRef(name=kids[0].val.name,
module=kids[0].val.module)])
@parsing.precedence(precedence.P_DOT)
def reduce_Expr_PathStep(self, *kids):
path = kids[0].val
if not isinstance(path, qlast.Path):
path = qlast.Path(steps=[path])
path.steps.append(kids[1].val)
self.val = path
@parsing.precedence(precedence.P_DOT)
def reduce_PathStep(self, *kids):
self.val = qlast.Path(steps=[kids[0].val], partial=True)
# special case of Path.0.1 etc.
@parsing.precedence(precedence.P_DOT)
def reduce_Expr_DOT_FCONST(self, *kids):
# this is a valid link-like syntax for accessing unnamed tuples
path = kids[0].val
if not isinstance(path, qlast.Path):
path = qlast.Path(steps=[path])
path.steps.extend(self._float_to_path(kids[2], kids[1].context))
self.val = path
@parsing.precedence(precedence.P_DOT)
def reduce_DOT_FCONST(self, *kids):
# this is a valid link-like syntax for accessing unnamed tuples
self.val = qlast.Path(
steps=self._float_to_path(kids[1], kids[0].context),
partial=True)
def _float_to_path(self, token, context):
from edb.schema import pointers as s_pointers
# make sure that the float is of the type 0.1
parts = token.val.split('.')
if not (len(parts) == 2 and parts[0].isdigit() and parts[1].isdigit()):
raise EdgeQLSyntaxError(
f"Unexpected {token.val!r}",
context=token.context)
# context for the AST is established manually here
return [
qlast.Ptr(
ptr=qlast.ObjectRef(
name=parts[0],
context=token.context,
),
direction=s_pointers.PointerDirection.Outbound,
context=context,
),
qlast.Ptr(
ptr=qlast.ObjectRef(
name=parts[1],
context=token.context,
),
direction=s_pointers.PointerDirection.Outbound,
context=token.context,
)
]
class PathStep(Nonterm):
def reduce_DOT_PathStepName(self, *kids):
from edb.schema import pointers as s_pointers
self.val = qlast.Ptr(
ptr=kids[1].val,
direction=s_pointers.PointerDirection.Outbound
)
def reduce_DOT_ICONST(self, *kids):
# this is a valid link-like syntax for accessing unnamed tuples
from edb.schema import pointers as s_pointers
self.val = qlast.Ptr(
ptr=qlast.ObjectRef(name=kids[1].val),
direction=s_pointers.PointerDirection.Outbound
)
def reduce_DOTBW_PathStepName(self, *kids):
from edb.schema import pointers as s_pointers
self.val = qlast.Ptr(
ptr=kids[1].val,
direction=s_pointers.PointerDirection.Inbound
)
def reduce_AT_ShortNodeName(self, *kids):
from edb.schema import pointers as s_pointers
self.val = qlast.Ptr(
ptr=kids[1].val,
direction=s_pointers.PointerDirection.Outbound,
type='property'
)
def reduce_TypeIntersection(self, *kids):
self.val = kids[0].val
class TypeIntersection(Nonterm):
def reduce_LBRACKET_IS_FullTypeExpr_RBRACKET(self, *kids):
self.val = qlast.TypeIntersection(
type=kids[2].val,
)
class OptTypeIntersection(Nonterm):
def reduce_TypeIntersection(self, *kids):
self.val = kids[0].val
def reduce_empty(self):
self.val = None
class PathStepName(Nonterm):
def reduce_ShortNodeName(self, *kids):
self.val = kids[0].val
def reduce_DUNDERTYPE(self, *kids):
self.val = qlast.ObjectRef(name=kids[0].val)
class FuncApplication(Nonterm):
def reduce_NodeName_LPAREN_OptFuncArgList_RPAREN(self, *kids):
module = kids[0].val.module
func_name = kids[0].val.name
name = func_name if not module else (module, func_name)
last_named_seen = None
args = []
kwargs = {}
for argname, argname_ctx, arg in kids[2].val:
if argname is not None:
if argname in kwargs:
raise EdgeQLSyntaxError(
f"duplicate named argument `{argname}`",
context=argname_ctx)
last_named_seen = argname
kwargs[argname] = arg
else:
if last_named_seen is not None:
raise EdgeQLSyntaxError(
f"positional argument after named "
f"argument `{last_named_seen}`",
context=arg.context)
args.append(arg)
self.val = qlast.FunctionCall(func=name, args=args, kwargs=kwargs)
class FuncExpr(Nonterm):
def reduce_FuncApplication(self, *kids):
self.val = kids[0].val
class FuncCallArgExpr(Nonterm):
def reduce_Expr(self, *kids):
self.val = (
None,
None,
kids[0].val,
)
def reduce_AnyIdentifier_ASSIGN_Expr(self, *kids):
self.val = (
kids[0].val,
kids[0].context,
kids[2].val,
)
def reduce_ARGUMENT_ASSIGN_Expr(self, *kids):
if kids[0].val[1].isdigit():
raise EdgeQLSyntaxError(
f"numeric named arguments are not supported",
context=kids[0].context)
else:
raise EdgeQLSyntaxError(
f"named arguments do not need a '$' prefix, "
f"rewrite as '{kids[0].val[1:]} := ...'",
context=kids[0].context)
class FuncCallArg(Nonterm):
def reduce_FuncCallArgExpr_OptFilterClause_OptSortClause(self, *kids):
self.val = kids[0].val
if kids[1].val or kids[2].val:
qry = qlast.SelectQuery(
result=self.val[2],
where=kids[1].val,
orderby=kids[2].val,
implicit=True,
)
self.val = (self.val[0], self.val[1], qry)
class FuncArgList(ListNonterm, element=FuncCallArg, separator=tokens.T_COMMA):
pass
class OptFuncArgList(Nonterm):
def reduce_FuncArgList_COMMA(self, *kids):
self.val = kids[0].val
def reduce_FuncArgList(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = []
class PosCallArg(Nonterm):
def reduce_Expr_OptFilterClause_OptSortClause(self, *kids):
self.val = kids[0].val
if kids[1].val or kids[2].val:
self.val = qlast.SelectQuery(
result=self.val,
where=kids[1].val,
orderby=kids[2].val,
implicit=True,
)
class PosCallArgList(ListNonterm, element=PosCallArg,
separator=tokens.T_COMMA):
pass
class OptPosCallArgList(Nonterm):
def reduce_PosCallArgList(self, *kids):
self.val = kids[0].val
def reduce_empty(self, *kids):
self.val = []
class Identifier(Nonterm):
def reduce_IDENT(self, *kids):
self.val = kids[0].clean_value
def reduce_UnreservedKeyword(self, *kids):
self.val = kids[0].val
class AnyIdentifier(Nonterm):
def reduce_Identifier(self, *kids):
self.val = kids[0].val
def reduce_ReservedKeyword(self, *kids):
name = kids[0].val
if name[:2] == '__' and name[-2:] == '__':
# There are a few reserved keywords like __std__ and __subject__
# that can be used in paths but are prohibited to be used
# anywhere else. So just as the tokenizer prohibits using
# __names__ in general, we enforce the rule here for the
# few remaining reserved __keywords__.
raise EdgeQLSyntaxError(
"identifiers surrounded by double underscores are forbidden",
context=kids[0].context)
self.val = name
class ModuleName(ListNonterm, element=AnyIdentifier, separator=tokens.T_DOT):
pass
# this can appear anywhere
class BaseName(Nonterm):
def reduce_Identifier(self, *kids):
self.val = [kids[0].val]
def reduce_Identifier_DOUBLECOLON_AnyIdentifier(self, *kids):
self.val = [kids[0].val, kids[2].val]
def reduce_DUNDERSTD_DOUBLECOLON_AnyIdentifier(self, *kids):
self.val = ['__std__', kids[2].val]
# Non-collection type.
class SimpleTypeName(Nonterm):
def reduce_NodeName(self, *kids):
self.val = qlast.TypeName(maintype=kids[0].val)
def reduce_ANYTYPE(self, *kids):
self.val = qlast.TypeName(maintype=qlast.AnyType())
def reduce_ANYTUPLE(self, *kids):
self.val = qlast.TypeName(maintype=qlast.AnyTuple())
class SimpleTypeNameList(ListNonterm, element=SimpleTypeName,
separator=tokens.T_COMMA):
pass
class CollectionTypeName(Nonterm):
def validate_subtype_list(self, lst):
has_nonstrval = has_strval = has_items = False
for el in lst.val:
if isinstance(el, qlast.TypeExprLiteral):
has_strval = True
elif isinstance(el, qlast.TypeName):
if el.name:
has_items = True
else:
has_nonstrval = True
if (has_nonstrval or has_items) and has_strval:
# Prohibit cases like `tuple<a: int64, 'aaaa'>` and
# `enum<bbbb, 'aaaa'>`
raise EdgeQLSyntaxError(
"mixing string type literals and type names is not supported",
context=lst.context)
if has_items and has_nonstrval:
# Prohibit cases like `tuple<a: int64, int32>`
raise EdgeQLSyntaxError(
"mixing named and unnamed subtype declarations "
"is not supported",
context=lst.context)
def reduce_NodeName_LANGBRACKET_RANGBRACKET(self, *kids):
# Constructs like `enum<>` or `array<>` aren't legal.
raise EdgeQLSyntaxError(
'parametrized type must have at least one argument',
context=kids[1].context,
)
def reduce_NodeName_LANGBRACKET_SubtypeList_RANGBRACKET(self, *kids):
self.validate_subtype_list(kids[2])
self.val = qlast.TypeName(
maintype=kids[0].val,
subtypes=kids[2].val,
)
def reduce_NodeName_LANGBRACKET_SubtypeList_COMMA_RANGBRACKET(self, *kids):
self.validate_subtype_list(kids[2])
self.val = qlast.TypeName(
maintype=kids[0].val,
subtypes=kids[2].val,
)
class TypeName(Nonterm):
def reduce_SimpleTypeName(self, *kids):
self.val = kids[0].val
def reduce_CollectionTypeName(self, *kids):
self.val = kids[0].val
class TypeNameList(ListNonterm, element=TypeName,
separator=tokens.T_COMMA):
pass
# This is a type expression without angle brackets, so it
# can be used without parentheses in a context where the
# angle bracket has a different meaning.
class TypeExpr(Nonterm):
def reduce_SimpleTypeName(self, *kids):
self.val = kids[0].val
def reduce_TYPEOF_Expr(self, *kids):
self.val = qlast.TypeOf(expr=kids[1].val)
def reduce_LPAREN_FullTypeExpr_RPAREN(self, *kids):
self.val = kids[1].val
def reduce_TypeExpr_PIPE_TypeExpr(self, *kids):
self.val = qlast.TypeOp(left=kids[0].val, op='|',
right=kids[2].val)
def reduce_TypeExpr_AMPER_TypeExpr(self, *kids):
self.val = qlast.TypeOp(left=kids[0].val, op='&',
right=kids[2].val)
# This is a type expression which includes collection types,
# so it can only be directly used in a context where the
# angle bracket is unambiguous.
class FullTypeExpr(Nonterm):
def reduce_TypeName(self, *kids):
self.val = kids[0].val
def reduce_TYPEOF_Expr(self, *kids):
self.val = qlast.TypeOf(expr=kids[1].val)
def reduce_LPAREN_FullTypeExpr_RPAREN(self, *kids):
self.val = kids[1].val
def reduce_FullTypeExpr_PIPE_FullTypeExpr(self, *kids):
self.val = qlast.TypeOp(left=kids[0].val, op='|',
right=kids[2].val)
def reduce_FullTypeExpr_AMPER_FullTypeExpr(self, *kids):
self.val = qlast.TypeOp(left=kids[0].val, op='&',
right=kids[2].val)
class Subtype(Nonterm):
def reduce_FullTypeExpr(self, *kids):
self.val = kids[0].val
def reduce_Identifier_COLON_FullTypeExpr(self, *kids):
self.val = kids[2].val
self.val.name = kids[0].val
def reduce_BaseStringConstant(self, *kids):
# TODO: Raise a DeprecationWarning once we have facility for that.
self.val = qlast.TypeExprLiteral(
val=kids[0].val,
)
class SubtypeList(ListNonterm, element=Subtype, separator=tokens.T_COMMA):
pass
class NodeName(Nonterm):
# NOTE: Generic short of fully-qualified name.
#
# This name is safe to be used anywhere as it starts with IDENT only.
def reduce_BaseName(self, *kids):
self.val = qlast.ObjectRef(
module='.'.join(kids[0].val[:-1]) or None,
name=kids[0].val[-1])
class NodeNameList(ListNonterm, element=NodeName, separator=tokens.T_COMMA):
pass
class ShortNodeName(Nonterm):
# NOTE: A non-qualified name that can be an identifier or
# UNRESERVED_KEYWORD.
#
# This name is used as part of paths after the DOT. It can be an
# identifier including UNRESERVED_KEYWORD and does not need to be
# quoted or parenthesized.
def reduce_Identifier(self, *kids):
self.val = qlast.ObjectRef(
module=None,
name=kids[0].val)
# ShortNodeNameList is needed in DDL, but it's worthwhile to define it
# here, near ShortNodeName.
class ShortNodeNameList(ListNonterm, element=ShortNodeName,
separator=tokens.T_COMMA):
pass
class AnyNodeName(Nonterm):
# NOTE: A non-qualified name that can be ANY identifier.
#
# This name is used as part of paths after the DOT. It can be any
# identifier including RESERVED_KEYWORD and UNRESERVED_KEYWORD and
# does not need to be quoted or parenthesized.
#
# This is mainly used in DDL statements that have another keyword
# completely disambiguating that what comes next is a name. It
# CANNOT be used in Expr productions because it will cause
# ambiguity with NodeName, etc.
def reduce_AnyIdentifier(self, *kids):
self.val = qlast.ObjectRef(
module=None,
name=kids[0].val)
class KeywordMeta(parsing.NontermMeta):
def __new__(mcls, name, bases, dct, *, type):
result = super().__new__(mcls, name, bases, dct)
assert type in keywords.keyword_types
for token in keywords.by_type[type].values():
def method(inst, *kids):
inst.val = kids[0].val
method = context.has_context(method)
method.__doc__ = "%%reduce %s" % token
method.__name__ = 'reduce_%s' % token
setattr(result, method.__name__, method)
return result
def __init__(cls, name, bases, dct, *, type):
super().__init__(name, bases, dct)
class UnreservedKeyword(Nonterm, metaclass=KeywordMeta,
type=keywords.UNRESERVED_KEYWORD):
pass
class ReservedKeyword(Nonterm, metaclass=KeywordMeta,
type=keywords.RESERVED_KEYWORD):
pass
class SchemaObjectClassValue(typing.NamedTuple):
itemclass: qltypes.SchemaObjectClass
class SchemaObjectClass(Nonterm):
def reduce_ALIAS(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.ALIAS)
def reduce_ANNOTATION(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.ANNOTATION)
def reduce_CAST(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.CAST)
def reduce_CONSTRAINT(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.CONSTRAINT)
def reduce_FUNCTION(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.FUNCTION)
def reduce_LINK(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.LINK)
def reduce_MODULE(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.MODULE)
def reduce_OPERATOR(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.OPERATOR)
def reduce_PROPERTY(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.PROPERTY)
def reduce_SCALAR_TYPE(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.SCALAR_TYPE)
def reduce_TYPE(self, *kids):
self.val = SchemaObjectClassValue(
itemclass=qltypes.SchemaObjectClass.TYPE)
class SchemaItem(Nonterm):
def reduce_SchemaObjectClass_NodeName(self, *kids):
ref = kids[1].val
ref.itemclass = kids[0].val.itemclass
self.val = ref
| 30.606215 | 79 | 0.614014 |
7956965f5af53b5f793773dae0e2d5392c2ebfef | 2,146 | py | Python | utils/common_tools.py | YoungYoung619/road_object_detection | 05b0bcac45b64a6ae72606d4be77d9f8220748f8 | [
"MIT"
] | 2 | 2019-06-24T03:23:35.000Z | 2019-07-03T06:53:26.000Z | utils/common_tools.py | YoungYoung619/road_object_detection | 05b0bcac45b64a6ae72606d4be77d9f8220748f8 | [
"MIT"
] | null | null | null | utils/common_tools.py | YoungYoung619/road_object_detection | 05b0bcac45b64a6ae72606d4be77d9f8220748f8 | [
"MIT"
] | null | null | null | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
Author:Team Li
"""
import tensorflow as tf
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def centerBboxes_2_cornerBboxes(center_bboxes):
""" change the center bounding boxes into corner bounding boxes
Args:
center_bboxes: a tensor, lowest dimention is [yc,xc,h,w]
Return:
a tensor with rank 4. lowest dimention means [ymin,xmin,ymax,xmax]
"""
shape = center_bboxes.get_shape().as_list()
try:
i = shape.index(None)
shape[i] = -1
except ValueError:
pass
center_bboxes = tf.reshape(center_bboxes, shape=[-1, 4])
ymin = center_bboxes[:, 0] - center_bboxes[:, 2] / 2 ##ymin = yc - h/2
xmin = center_bboxes[:, 1] - center_bboxes[:, 3] / 2 ##xmin = xc - w/2
ymax = center_bboxes[:, 0] + center_bboxes[:, 2] / 2 ##ymin = yc + h/2
xmax = center_bboxes[:, 1] + center_bboxes[:, 3] / 2 ##xmin = xc - w/2
corner_bboxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
return tf.reshape(corner_bboxes, shape=shape)
def cornerBboxes_2_centerBboxes(corner_bboxes):
""" change the center bounding boxes into corner bounding boxes
Args:
corner_bboxes: a tensor. lowest dimention means [ymin,xmin,ymax,xmax]
Return:
a tensor, has the same shape with input, lowest dimention means [yc,xc,h,w]
"""
shape = corner_bboxes.get_shape().as_list()
try:
i = shape.index(None)
shape[i] = -1
except ValueError:
pass
corner_bboxes = tf.reshape(corner_bboxes, shape=[-1, 4])
cy = (corner_bboxes[:, 0] + corner_bboxes[:, 2]) / 2. ##yc = (ymin + ymax)/2
cx = (corner_bboxes[:, 1] + corner_bboxes[:, 3]) / 2. ##xc = (xmin + xmax)/2
h = corner_bboxes[:, 2] - corner_bboxes[:, 0] ##h = ymax - ymin
w = corner_bboxes[:, 3] - corner_bboxes[:, 1] ##w = xmax - xmin
center_bboxes = tf.stack([cy, cx, h, w], axis=-1)
return tf.reshape(center_bboxes, shape=shape) | 37.649123 | 91 | 0.637931 |
79569718fdda2823e3af43e3c9d6dfa8397e4b65 | 446 | py | Python | src/statsAuxiliary/statsAuxiliary.py | nickeita/su2021_is601_project2 | 4974a05517c7884751c5ece09177af2a7640f503 | [
"MIT"
] | null | null | null | src/statsAuxiliary/statsAuxiliary.py | nickeita/su2021_is601_project2 | 4974a05517c7884751c5ece09177af2a7640f503 | [
"MIT"
] | null | null | null | src/statsAuxiliary/statsAuxiliary.py | nickeita/su2021_is601_project2 | 4974a05517c7884751c5ece09177af2a7640f503 | [
"MIT"
] | null | null | null | from calculator.calculator import Calculator
from statsAuxiliary.aggregate import aggregate
from statsAuxiliary.varianceInput import variance_input
class StatsAuxiliary(Calculator):
result = 0
def __init__(self):
super().__init__()
pass
def aggr(self, a):
self.result = aggregate(a)
return self.result
def var_input(self, a):
self.result = variance_input(a)
return self.result
| 22.3 | 55 | 0.688341 |
7956974aaeab2d596e7eea989c51c0e9c866f86d | 1,868 | py | Python | src/atcoder/abc006/a/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 1 | 2021-07-11T03:20:10.000Z | 2021-07-11T03:20:10.000Z | src/atcoder/abc006/a/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 39 | 2021-07-10T05:21:09.000Z | 2021-12-15T06:10:12.000Z | src/atcoder/abc006/a/sol_0.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | null | null | null | class Reader:
@staticmethod
def readline():
import sys
return sys.stdin.buffer \
.readline().rstrip()
@classmethod
def read_int(cls):
i = int(cls.readline())
return i
@classmethod
def read_str(cls):
s = cls.readline().decode()
return s
@classmethod
def readline_ints(cls):
*ints, = map(
int,
cls.readline().split(),
)
return ints
@classmethod
def readline_strs(cls):
s = cls.read_str().split()
return s
@staticmethod
def read():
import sys
i = sys.stdin.buffer.read()
return i
@classmethod
def read_ints(cls):
*ints, = map(
int,
cls.read().split(),
)
return ints
@classmethod
def read_strs(cls):
return cls.read() \
.decode().split()
@staticmethod
def readlines():
import sys
lines = sys.stdin.buffer \
.readlines()
lines = [
l.rstrip()
for l in lines
]
return lines
class ReaderNumpy(Reader):
@classmethod
def readline_ints(cls):
import numpy as np
return np.fromstring(
string=cls.read_str(),
dtype=np.int64,
sep=' ',
)
@classmethod
def read_ints(cls):
import numpy as np
return np.fromstring(
string=cls.read() \
.decode(),
dtype=np.int64,
sep=' ',
)
class Solver:
def __init__(self):
self.reader = Reader()
# self.reader = ReaderNumpy()
def __prepare(self):
reader = self.reader
n = reader.read_int()
self.n = n
def __solve(self):
n = self.n
if n%3==0 or '3' in str(n):
print('YES')
else:
print('NO')
def run(self):
self.__prepare()
self.__solve()
def main():
t = 1
# t = Reader.read_int()
for _ in range(t):
solver = Solver()
solver.run()
if __name__ == '__main__':
main() | 14.48062 | 33 | 0.558351 |
795698164ac050be457db63b05c07e909e06d2a2 | 441 | py | Python | data_structure_helpers/tests/test_list_helpers.py | ChristopherHaydenTodd/ctodd-python-lib-data-structures | 4d9ad3ce7a066993cdff337fd0d3ca0faed75bbc | [
"MIT"
] | 1 | 2019-02-24T14:04:37.000Z | 2019-02-24T14:04:37.000Z | data_structure_helpers/tests/test_list_helpers.py | ChristopherHaydenTodd/ctodd-python-lib-data-structures | 4d9ad3ce7a066993cdff337fd0d3ca0faed75bbc | [
"MIT"
] | null | null | null | data_structure_helpers/tests/test_list_helpers.py | ChristopherHaydenTodd/ctodd-python-lib-data-structures | 4d9ad3ce7a066993cdff337fd0d3ca0faed75bbc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Purpose:
Test File for list_helpers.py
"""
# Python Library Imports
import os
import sys
import pytest
from unittest import mock
# Import File to Test
from data_structure_helpers import list_helpers
###
# Fixtures
###
# None at the Moment (Empty Test Suite)
###
# Mocked Functions
###
# None at the Moment (Empty Test Suite)
###
# Test Payload
###
# None at the Moment (Empty Test Suite)
| 11.307692 | 47 | 0.680272 |
79569885bc3f6c1a7426588de780d836f075d64d | 873 | py | Python | textbooks/nltk_book/www.nltk.org/book/pylisting/code_toolbox_validation.py | motazsaad/NLP-ICTS6361 | 1d44a187dc966ac45bbd236384d059347efbde50 | [
"Apache-2.0"
] | 10 | 2019-09-20T21:40:03.000Z | 2021-06-22T23:42:44.000Z | textbooks/nltk_book/www.nltk.org/book/pylisting/code_toolbox_validation.py | motazsaad/NLP-ICTS6361 | 1d44a187dc966ac45bbd236384d059347efbde50 | [
"Apache-2.0"
] | null | null | null | textbooks/nltk_book/www.nltk.org/book/pylisting/code_toolbox_validation.py | motazsaad/NLP-ICTS6361 | 1d44a187dc966ac45bbd236384d059347efbde50 | [
"Apache-2.0"
] | 9 | 2019-10-26T07:12:29.000Z | 2022-03-29T03:39:08.000Z | # Natural Language Toolkit: code_toolbox_validation
grammar = nltk.CFG.fromstring('''
S -> Head PS Glosses Comment Date Sem_Field Examples
Head -> Lexeme Root
Lexeme -> "lx"
Root -> "rt" |
PS -> "ps"
Glosses -> Gloss Glosses |
Gloss -> "ge" | "tkp" | "eng"
Date -> "dt"
Sem_Field -> "sf"
Examples -> Example Ex_Pidgin Ex_English Examples |
Example -> "ex"
Ex_Pidgin -> "xp"
Ex_English -> "xe"
Comment -> "cmt" | "nt" |
''')
def validate_lexicon(grammar, lexicon, ignored_tags):
rd_parser = nltk.RecursiveDescentParser(grammar)
for entry in lexicon:
marker_list = [field.tag for field in entry if field.tag not in ignored_tags]
if list(rd_parser.parse(marker_list)):
print("+", ':'.join(marker_list)) # [_accepted-entries]
else:
print("-", ':'.join(marker_list)) # [_rejected-entries]
| 30.103448 | 85 | 0.630011 |
795698c4106804f01241ee98a63ac3fce66a01c7 | 3,638 | py | Python | statick_tool/discovery_plugin.py | kogut/statick | 6cbe43b5ac78275a12af3ac5aa325833368d0767 | [
"CC0-1.0"
] | 54 | 2018-08-27T19:12:41.000Z | 2022-03-31T04:16:40.000Z | statick_tool/discovery_plugin.py | gregtkogut/statick | 11a8f950d50b52903a25f4f00c7cd52a90eff56c | [
"CC0-1.0"
] | 288 | 2018-08-28T13:17:44.000Z | 2022-03-21T20:05:19.000Z | statick_tool/discovery_plugin.py | gregtkogut/statick | 11a8f950d50b52903a25f4f00c7cd52a90eff56c | [
"CC0-1.0"
] | 12 | 2018-08-28T13:18:39.000Z | 2022-03-21T19:27:16.000Z | """Discovery plugin."""
import logging
import os
import subprocess
import sys
from typing import Any, List, Optional, Union
from yapsy.IPlugin import IPlugin
from statick_tool.exceptions import Exceptions
from statick_tool.package import Package
from statick_tool.plugin_context import PluginContext
class DiscoveryPlugin(IPlugin): # type: ignore
"""Default implementation of discovery plugin."""
plugin_context = None
def get_name(self) -> Optional[str]:
"""Get name of plugin."""
@classmethod
def get_discovery_dependencies(cls) -> List[str]:
"""Get a list of discovery plugins that must run before this one."""
return []
def gather_args(self, args: Any) -> None:
"""Gather arguments for plugin."""
def scan(
self, package: Package, level: str, exceptions: Optional[Exceptions] = None
) -> None:
"""Scan package to discover files for analysis.
If exceptions is passed, then the plugin should (if practical) use it to filter
which files the plugin detects.
"""
def find_files(self, package: Package) -> None:
"""Walk the package path exactly once to discover files for analysis."""
if package._walked: # pylint: disable=protected-access
return
for root, _, files in os.walk(package.path):
for fname in files:
full_path = os.path.join(root, fname)
abs_path = os.path.abspath(full_path)
file_output = self.get_file_cmd_output(full_path)
file_dict = {
"name": fname.lower(),
"path": abs_path,
"file_cmd_out": file_output,
}
package.files[abs_path] = file_dict
package._walked = True # pylint: disable=protected-access
def get_file_cmd_output(self, full_path: str) -> str:
"""Run the file command (if it exists) on the supplied path.
The output from the file command is converted to lowercase.
There are two recommended ways to check it:
1. When searching for a single string just use the python "in" operator:
if "search string" in fild_dict["file_cmd_out"]:
2. When searching for multiple different strings, use the `any()` function:
expected_output = ("output_1", "output_2")
if any(item in file_dict["file_cmd_out"] for item in expected_output):
"""
if not self.file_command_exists():
return ""
try:
output: str = subprocess.check_output(
["file", full_path], universal_newlines=True
)
return output.lower()
except subprocess.CalledProcessError as ex:
logging.warning(
"Failed to run 'file' command. Returncode = %d", ex.returncode
)
logging.warning("Exception output: %s", ex.output)
return ""
def set_plugin_context(self, plugin_context: Union[None, PluginContext]) -> None:
"""Set the plugin context."""
self.plugin_context = plugin_context
@staticmethod
def file_command_exists() -> bool:
"""Return whether the 'file' command is available on $PATH."""
if sys.platform == "win32":
command_name = "file.exe"
else:
command_name = "file"
for path in os.environ["PATH"].split(os.pathsep):
exe_path = os.path.join(path, command_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
return True
return False
| 34.320755 | 87 | 0.61105 |
79569a2fef7671c8bf74e86781c95074083f2ebc | 13,866 | py | Python | apps/cif_input/PyCifRW/YappsStarParser_1_0.py | mtaillefumier/SIRIUS | 50ec1c202c019113c5660f1966b170dec9dfd4d4 | [
"BSD-2-Clause"
] | 77 | 2016-03-18T08:38:30.000Z | 2022-03-11T14:06:25.000Z | apps/cif_input/PyCifRW/YappsStarParser_1_0.py | simonpintarelli/SIRIUS | f4b5c4810af2a3ea1e67992d65750535227da84b | [
"BSD-2-Clause"
] | 240 | 2016-04-12T16:39:11.000Z | 2022-03-31T08:46:12.000Z | apps/cif_input/PyCifRW/YappsStarParser_1_0.py | simonpintarelli/SIRIUS | f4b5c4810af2a3ea1e67992d65750535227da84b | [
"BSD-2-Clause"
] | 43 | 2016-03-18T17:45:07.000Z | 2022-02-28T05:27:59.000Z | from StarFile import StarBlock,StarFile
# An alternative specification for the Cif Parser, based on Yapps2
# by Amit Patel (http://theory.stanford.edu/~amitp/Yapps)
#
# helper code: we define our match tokens
lastval = ''
def monitor(location,value):
global lastval
#print 'At %s: %s' % (location,`value`)
lastval = `value`
return value
# Strip extras gets rid of leading and trailing whitespace, and
# semicolons.
def stripextras(value):
from StarFile import remove_line_folding, remove_line_prefix
# we get rid of semicolons and leading/trailing terminators etc.
import re
jj = re.compile("[\n\r\f \t\v]*")
semis = re.compile("[\n\r\f \t\v]*[\n\r\f]\n*;")
cut = semis.match(value)
if cut: #we have a semicolon-delimited string
nv = value[cut.end():len(value)-2]
try:
if nv[-1]=='\r': nv = nv[:-1]
except IndexError: #empty data value
pass
# apply protocols
nv = remove_line_prefix(nv)
nv = remove_line_folding(nv)
return nv
else:
cut = jj.match(value)
if cut:
return stripstring(value[cut.end():])
return value
# helper function to get rid of inverted commas etc.
def stripstring(value):
if value:
if value[0]== '\'' and value[-1]=='\'':
return value[1:-1]
if value[0]=='"' and value[-1]=='"':
return value[1:-1]
return value
# helper function to get rid of triple quotes
def striptriple(value):
if value:
if value[:3] == '"""' and value[-3:] == '"""':
return value[3:-3]
if value[:3] == "'''" and value[-3:] == "'''":
return value[3:-3]
return value
# helper function to populate a StarBlock given a list of names
# and values .
#
# Note that there may be an empty list at the very end of our itemlists,
# so we remove that if necessary.
#
def makeloop(target_block,loopdata):
loop_seq,itemlists = loopdata
if itemlists[-1] == []: itemlists.pop(-1)
# print 'Making loop with %s' % `itemlists`
step_size = len(loop_seq)
for col_no in range(step_size):
target_block.AddItem(loop_seq[col_no], itemlists[col_no::step_size],precheck=True)
# print 'Makeloop constructed %s' % `loopstructure`
# now construct the loop
try:
target_block.CreateLoop(loop_seq) #will raise ValueError on problem
except ValueError:
error_string = 'Incorrect number of loop values for loop containing %s' % `loop_seq`
print >>sys.stderr, error_string
raise ValueError, error_string
# return an object with the appropriate amount of nesting
def make_empty(nestlevel):
gd = []
for i in range(1,nestlevel):
gd = [gd]
return gd
# this function updates a dictionary first checking for name collisions,
# which imply that the CIF is invalid. We need case insensitivity for
# names.
# Unfortunately we cannot check loop item contents against non-loop contents
# in a non-messy way during parsing, as we may not have easy access to previous
# key value pairs in the context of our call (unlike our built-in access to all
# previous loops).
# For this reason, we don't waste time checking looped items against non-looped
# names during parsing of a data block. This would only match a subset of the
# final items. We do check against ordinary items, however.
#
# Note the following situations:
# (1) new_dict is empty -> we have just added a loop; do no checking
# (2) new_dict is not empty -> we have some new key-value pairs
#
def cif_update(old_dict,new_dict,loops):
old_keys = map(lambda a:a.lower(),old_dict.keys())
if new_dict != {}: # otherwise we have a new loop
#print 'Comparing %s to %s' % (`old_keys`,`new_dict.keys()`)
for new_key in new_dict.keys():
if new_key.lower() in old_keys:
raise CifError, "Duplicate dataname or blockname %s in input file" % new_key
old_dict[new_key] = new_dict[new_key]
#
# this takes two lines, so we couldn't fit it into a one line execution statement...
def order_update(order_array,new_name):
order_array.append(new_name)
return new_name
# and finally...turn a sequence into a python dict (thanks to Stackoverflow)
def pairwise(iterable):
itnext = iter(iterable).next
while 1:
yield itnext(), itnext()
# Begin -- grammar generated by Yapps
import sys, re
import yapps3_compiled_rt as yappsrt
class StarParserScanner(yappsrt.Scanner):
patterns = [
('([ \t\n\r](?!;))|[ \t]', re.compile('([ \t\n\r](?!;))|[ \t]')),
('(#.*[\n\r](?!;))|(#.*)', re.compile('(#.*[\n\r](?!;))|(#.*)')),
('LBLOCK', re.compile('(L|l)(O|o)(O|o)(P|p)_')),
('GLOBAL', re.compile('(G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_')),
('STOP', re.compile('(S|s)(T|t)(O|o)(P|p)_')),
('save_heading', re.compile('(S|s)(A|a)(V|v)(E|e)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+')),
('save_end', re.compile('(S|s)(A|a)(V|v)(E|e)_')),
('data_name', re.compile('_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+')),
('data_heading', re.compile('(D|d)(A|a)(T|t)(A|a)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+')),
('start_sc_line', re.compile('(\n|\r\n);([^\n\r])*(\r\n|\r|\n)+')),
('sc_line_of_text', re.compile('[^;\r\n]([^\r\n])*(\r\n|\r|\n)+')),
('end_sc_line', re.compile(';')),
('data_value_1', re.compile('((?!(((S|s)(A|a)(V|v)(E|e)_[^\\s]*)|((G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_[^\\s]*)|((S|s)(T|t)(O|o)(P|p)_[^\\s]*)|((D|d)(A|a)(T|t)(A|a)_[^\\s]*)))[^\\s"#$\'_][^\\s]*)|\'((\'(?=\\S))|([^\n\r\x0c\']))*\'+|"(("(?=\\S))|([^\n\r"]))*"+')),
('END', re.compile('$')),
]
def __init__(self, str):
yappsrt.Scanner.__init__(self,None,['([ \t\n\r](?!;))|[ \t]', '(#.*[\n\r](?!;))|(#.*)'],str)
class StarParser(yappsrt.Parser):
Context = yappsrt.Context
def input(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'input', [prepared])
_token = self._peek('END', 'data_heading')
if _token == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks = prepared;allblocks.merge_fast(dblock)
while self._peek('END', 'data_heading') == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks.merge_fast(dblock)
if self._peek() not in ['END', 'data_heading']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['END', 'data_heading']))
END = self._scan('END')
else: # == 'END'
END = self._scan('END')
allblocks = prepared
return allblocks
def dblock(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dblock', [prepared])
data_heading = self._scan('data_heading')
heading = data_heading[5:];thisbc=StarFile(characterset='unicode',standard=prepared.standard);thisbc.NewBlock(heading,StarBlock(overwrite=False))
while self._peek('save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading') in ['save_heading', 'LBLOCK', 'data_name']:
_token = self._peek('save_heading', 'LBLOCK', 'data_name')
if _token != 'save_heading':
dataseq = self.dataseq(thisbc[heading], _context)
else: # == 'save_heading'
save_frame = self.save_frame(_context)
thisbc.merge_fast(save_frame,parent=heading)
if self._peek() not in ['save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading']))
thisbc[heading].setmaxnamelength(thisbc[heading].maxnamelength);return (monitor('dblock',thisbc))
def dataseq(self, starblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dataseq', [starblock])
data = self.data(starblock, _context)
while self._peek('LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading') in ['LBLOCK', 'data_name']:
data = self.data(starblock, _context)
if self._peek() not in ['LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']))
def data(self, currentblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data', [currentblock])
_token = self._peek('LBLOCK', 'data_name')
if _token == 'LBLOCK':
top_loop = self.top_loop(_context)
makeloop(currentblock,top_loop)
else: # == 'data_name'
datakvpair = self.datakvpair(_context)
currentblock.AddItem(datakvpair[0],datakvpair[1],precheck=True)
def datakvpair(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'datakvpair', [])
data_name = self._scan('data_name')
data_value = self.data_value(_context)
return [data_name,data_value]
def data_value(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data_value', [])
_token = self._peek('data_value_1', 'start_sc_line')
if _token == 'data_value_1':
data_value_1 = self._scan('data_value_1')
thisval = stripstring(data_value_1)
else: # == 'start_sc_line'
sc_lines_of_text = self.sc_lines_of_text(_context)
thisval = stripextras(sc_lines_of_text)
return monitor('data_value',thisval)
def sc_lines_of_text(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'sc_lines_of_text', [])
start_sc_line = self._scan('start_sc_line')
lines = start_sc_line
while self._peek('end_sc_line', 'sc_line_of_text') == 'sc_line_of_text':
sc_line_of_text = self._scan('sc_line_of_text')
lines = lines+sc_line_of_text
if self._peek() not in ['end_sc_line', 'sc_line_of_text']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['sc_line_of_text', 'end_sc_line']))
end_sc_line = self._scan('end_sc_line')
return monitor('sc_line_of_text',lines+end_sc_line)
def top_loop(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'top_loop', [])
LBLOCK = self._scan('LBLOCK')
loopfield = self.loopfield(_context)
loopvalues = self.loopvalues(_context)
return loopfield,loopvalues
def loopfield(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopfield', [])
toploop=[]
while self._peek('data_name', 'data_value_1', 'start_sc_line') == 'data_name':
data_name = self._scan('data_name')
toploop.append(data_name)
if self._peek() not in ['data_name', 'data_value_1', 'start_sc_line']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_name', 'data_value_1', 'start_sc_line']))
return toploop
def loopvalues(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopvalues', [])
data_value = self.data_value(_context)
dataloop=[data_value]
while self._peek('data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading') in ['data_value_1', 'start_sc_line']:
data_value = self.data_value(_context)
dataloop.append(monitor('loopval',data_value))
if self._peek() not in ['data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']))
return dataloop
def save_frame(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'save_frame', [])
save_heading = self._scan('save_heading')
savehead = save_heading[5:];savebc = StarFile();savebc.NewBlock(savehead,StarBlock(overwrite=False))
while self._peek('save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading') in ['save_heading', 'LBLOCK', 'data_name']:
_token = self._peek('save_heading', 'LBLOCK', 'data_name')
if _token != 'save_heading':
dataseq = self.dataseq(savebc[savehead], _context)
else: # == 'save_heading'
save_frame = self.save_frame(_context)
savebc.merge_fast(save_frame,parent=savehead)
if self._peek() not in ['save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading']:
raise yappsrt.SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading']))
save_end = self._scan('save_end')
return monitor('save_frame',savebc)
def parse(rule, text):
P = StarParser(StarParserScanner(text))
return yappsrt.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
| 49.170213 | 265 | 0.618491 |
79569ac2edabe2aa7eb861937d06d846ce1c5204 | 689 | py | Python | test/tests/struct_test.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | test/tests/struct_test.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/struct_test.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | import struct
s = struct.pack("II", 1, 1234)
print repr(s)
print struct.unpack("II", s)
nums = [0, 0x98, -0x12, 0x9876, -0x1234, 0x98765432, -0x12345678, 0x9876543212345678, -0x1234567812345678]
for exp in 7, 8, 15, 16, 31, 32, 63, 64:
nums += [2 ** exp, 2 ** exp - 1, -2 ** exp, -2 ** exp - 1]
for format in "bB?hHiIlLqQP":
for order in [""] + list("@=<>!"):
for num in nums:
try:
spec = "%s%s" % (order, format)
print (spec, hex(num)), repr(struct.pack(spec, num))
except struct.error as e:
print "struct.error:", e
except OverflowError as e:
print "OverflowError:", e
| 34.45 | 106 | 0.532656 |
79569bea9466288f72f30ea2a9534579973e5b30 | 434 | py | Python | s7_python_functions/scope.py | chiehandlu/pythonlearn | 53ba8f0f8edc7df7b09b0f233d52d7145d380ec0 | [
"Apache-2.0"
] | null | null | null | s7_python_functions/scope.py | chiehandlu/pythonlearn | 53ba8f0f8edc7df7b09b0f233d52d7145d380ec0 | [
"Apache-2.0"
] | null | null | null | s7_python_functions/scope.py | chiehandlu/pythonlearn | 53ba8f0f8edc7df7b09b0f233d52d7145d380ec0 | [
"Apache-2.0"
] | null | null | null |
"""
Names defined inside a def can only be seen by the code within that def.
You cannot even refer to such names from outside the function.
"""
# Global scope
x = 99
print('x =', x)
# 這邊的x是全局變量global scope
x += 1
print('x =', x)
def tryTest():
z = x + 1
print('z =', z)
# 這邊的z是局部變量local scope
tryTest()
def test():
global x
# 表示這裡的x是global的x,所以global的x就被更改了
x += 1
# print('z =', z)
test()
print('x =', x)
| 14 | 72 | 0.617512 |
79569c63e994aa8f7cc72dac3a845b6d4a9f8d3e | 50 | py | Python | main.py | HaBaLeS/MqttPixel | 26a6245a5ff787a3165b04d8bde89c39043de25d | [
"Beerware"
] | null | null | null | main.py | HaBaLeS/MqttPixel | 26a6245a5ff787a3165b04d8bde89c39043de25d | [
"Beerware"
] | null | null | null | main.py | HaBaLeS/MqttPixel | 26a6245a5ff787a3165b04d8bde89c39043de25d | [
"Beerware"
] | null | null | null | import machine
def reboot():
machine.reset()
| 10 | 19 | 0.68 |
79569db91cff9ed4a2cde46f633ed960428f2749 | 403 | py | Python | tests/test_a0134gasstation.py | nirofang/pyleet | 600d58ad97028c9a14148af4ef468683a011a515 | [
"MIT"
] | 3 | 2019-11-06T13:10:38.000Z | 2021-11-17T07:29:54.000Z | tests/test_a0134gasstation.py | nirofang/pyleet | 600d58ad97028c9a14148af4ef468683a011a515 | [
"MIT"
] | 1 | 2020-12-17T22:18:05.000Z | 2020-12-17T22:18:05.000Z | tests/test_a0134gasstation.py | nirofang/pyleet | 600d58ad97028c9a14148af4ef468683a011a515 | [
"MIT"
] | 1 | 2019-11-06T13:10:45.000Z | 2019-11-06T13:10:45.000Z | from solutions.a0134gasstation import Solution
solution = Solution()
def test_canCompleteCircuit1():
gas = [1, 2, 3, 4, 5]
cost = [3, 4, 5, 1, 2]
expect = 3
actual = solution.canCompleteCircuit(gas, cost)
assert actual == expect
def test_canCompleteCircuit2():
gas = [2, 3, 4]
cost = [3, 4, 3]
expect = -1
actual = solution.canCompleteCircuit(gas, cost)
assert actual == expect
| 20.15 | 49 | 0.669975 |
79569de02bd529bb54f50aba2a6ce01bc901b578 | 6,041 | py | Python | zipline/utils/functional.py | degiere/zipline | bc0b117dc94b8e93081818964e3b1bdbf9b33abb | [
"Apache-2.0"
] | null | null | null | zipline/utils/functional.py | degiere/zipline | bc0b117dc94b8e93081818964e3b1bdbf9b33abb | [
"Apache-2.0"
] | null | null | null | zipline/utils/functional.py | degiere/zipline | bc0b117dc94b8e93081818964e3b1bdbf9b33abb | [
"Apache-2.0"
] | 1 | 2019-09-20T01:08:33.000Z | 2019-09-20T01:08:33.000Z | from pprint import pformat
from six import viewkeys
from six.moves import map, zip
from toolz import curry
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Example
-------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Example
-------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Example
-------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
| 24.657143 | 79 | 0.540473 |
79569e42da0d7272105fb92a5c6b26c7175b5e46 | 838 | py | Python | app.py | yannicfreson/MiFurst_API | 6677decfda06cfd092ebb460c5ddf3ad71516fde | [
"MIT"
] | null | null | null | app.py | yannicfreson/MiFurst_API | 6677decfda06cfd092ebb460c5ddf3ad71516fde | [
"MIT"
] | null | null | null | app.py | yannicfreson/MiFurst_API | 6677decfda06cfd092ebb460c5ddf3ad71516fde | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful import Api, Resource, reqparse
import random
app = Flask(__name__)
api = Api(app)
quotes = [
"It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."
]
class Quote(Resource):
def get(self):
return quotes[random.randrange(len(quotes))], 200
api.add_resource(Quote, "/eightball", "/eightball/")
if __name__ == '__main__':
app.run()
| 21.487179 | 57 | 0.600239 |
79569f04bff92aaf5ce42a2e62e57531ba95acef | 1,159 | py | Python | pex/dist_metadata.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | pex/dist_metadata.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | pex/dist_metadata.py | jneuff/pex | f7eb554ecdbfdb9cd24a793b4dda358035e26855 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import email
from pex.third_party.packaging.specifiers import SpecifierSet
from pex.third_party.pkg_resources import DistInfoDistribution, Distribution
from pex.typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional
def requires_python(dist):
# type: (Distribution) -> Optional[SpecifierSet]
"""Examines dist for `Python-Requires` metadata and returns version constraints if any.
See: https://www.python.org/dev/peps/pep-0345/#requires-python
:param dist: A distribution to check for `Python-Requires` metadata.
:return: The required python version specifiers.
"""
if not dist.has_metadata(DistInfoDistribution.PKG_INFO):
return None
metadata = dist.get_metadata(DistInfoDistribution.PKG_INFO)
pkg_info = email.parser.Parser().parsestr(metadata)
python_requirement = pkg_info.get("Requires-Python")
if not python_requirement:
return None
return SpecifierSet(python_requirement)
| 33.114286 | 91 | 0.765315 |
79569f878b44d7ae7e778628940246d444b6a890 | 21,557 | py | Python | src/odin/adapters/parameter_tree.py | odin-detector/odin-control | 366085b4fc04c620ee2e9e040a883f5d3d70d5a0 | [
"Apache-2.0"
] | 4 | 2018-05-24T13:38:23.000Z | 2021-08-18T08:32:54.000Z | src/odin/adapters/parameter_tree.py | odin-detector/odin-control | 366085b4fc04c620ee2e9e040a883f5d3d70d5a0 | [
"Apache-2.0"
] | 20 | 2018-04-10T09:28:01.000Z | 2022-03-17T11:43:59.000Z | src/odin/adapters/parameter_tree.py | odin-detector/odin-control | 366085b4fc04c620ee2e9e040a883f5d3d70d5a0 | [
"Apache-2.0"
] | 3 | 2017-06-07T13:28:38.000Z | 2019-07-16T10:02:21.000Z | """ParameterTree - classes representing a tree of parameters and their accessor methods.
This module implements an arbitrarily-structured, recursively-managed tree of parameters and
the appropriate accessor methods that are used to read and write those parameters. Its
particular use is in the definition of a tree of parameters for an API adapter and help
interfacing of those to the underlying device or object.
James Hogge, Tim Nicholls, STFC Application Engineering Group.
"""
import warnings
class ParameterTreeError(Exception):
"""Simple error class for raising parameter tree parameter tree exceptions."""
pass
class ParameterAccessor(object):
"""Container class representing accessor methods for a parameter.
This class implements a parameter accessor, provding set and get methods
for parameters requiring calls to access them, or simply returning the
appropriate value if the parameter is a read-only constant. Parameter accessors also
contain metadata fields controlling access to and providing information about the parameter.
Valid specifiable metadata fields are:
min : minimum allowed value for parameter
max : maxmium allowed value for parameter
allowed_values: list of allowed values for parameter
name : readable parameter name
description: longer description of parameter
units: parameter units
display_precision: number of decimal places to display for e.g. float types
The class also maintains the following automatically-populated metadata fields:
type: parameter type
writeable: is the parameter writable
"""
# Valid metadata arguments that can be passed to ParameterAccess __init__ method.
VALID_METADATA_ARGS = (
"min", "max", "allowed_values", "name", "description", "units", "display_precision"
)
# Automatically-populated metadata fields based on inferred type of the parameter and
# writeable status depending on specified accessors
AUTO_METADATA_FIELDS = ("type", "writeable")
def __init__(self, path, getter=None, setter=None, **kwargs):
"""Initialise the ParameterAccessor instance.
This constructor initialises the ParameterAccessor instance, storing
the path of the parameter, its set/get accessors and setting metadata fields based
on the the specified keyword arguments
:param path: path of the parameter within the tree
:param getter: get method for the parameter, or a value if read-only constant
:param setter: set method for the parameter
:param kwargs: keyword argument list for metadata fields to be set; these must be from
the allow list specified in ParameterAccessor.allowed_metadata
"""
# Initialise path, getter and setter
self.path = path[:-1]
self._get = getter
self._set = setter
# Initialize metadata dict
self.metadata = {}
# Check metadata keyword arguments are valid
for arg in kwargs:
if arg not in ParameterAccessor.VALID_METADATA_ARGS:
raise ParameterTreeError("Invalid metadata argument: {}".format(arg))
# Update metadata keywords from arguments
self.metadata.update(kwargs)
# Save the type of the parameter for type checking
self._type = type(self.get())
# Set type and writeable metadata fields based on specified accessors
self.metadata["type"] = type(self.get()).__name__
if not callable(self._set) and callable(self._get):
self.metadata["writeable"] = False
else:
self.metadata["writeable"] = True
def get(self, with_metadata=False):
"""Get the value of the parameter.
This method returns the value of the parameter, or the value returned
by the get accessor if one is defined (i.e. is callable). If the with_metadata argument
is true, the value is returned in a dictionary including all metadata for the
parameter.
:param with_metadata: include metadata in the response when set to True
:returns value of the parameter
"""
# Determine the value of the parameter by calling the getter or simply from the stored
# value
if callable(self._get):
value = self._get()
else:
value = self._get
# If metadata is requested, replace the value with a dict containing the value itself
# plus metadata fields
if with_metadata:
value = {"value": value}
value.update(self.metadata)
return value
def set(self, value):
"""Set the value of the parameter.
This method sets the value of the parameter by calling the set accessor
if defined and callable, otherwise raising an exception.
:param value: value to set
"""
# Raise an error if this parameter is not writeable
if not self.metadata["writeable"]:
raise ParameterTreeError("Parameter {} is read-only".format(self.path))
# Raise an error of the value to be set is not of the same type as the parameter. If
# the metadata type field is set to None, allow any type to be set, or if the value
# is integer and the parameter is float, also allow as JSON does not differentiate
# numerics in all cases
if self.metadata["type"] != "NoneType" and not isinstance(value, self._type):
if not (isinstance(value, int) and self.metadata["type"] == "float"):
raise ParameterTreeError(
"Type mismatch setting {}: got {} expected {}".format(
self.path, type(value).__name__, self.metadata["type"]
)
)
# Raise an error if allowed_values has been set for this parameter and the value to
# set is not one of them
if "allowed_values" in self.metadata and value not in self.metadata["allowed_values"]:
raise ParameterTreeError(
"{} is not an allowed value for {}".format(value, self.path)
)
# Raise an error if the parameter has a mininum value specified in metadata and the
# value to set is below this
if "min" in self.metadata and value < self.metadata["min"]:
raise ParameterTreeError(
"{} is below the minimum value {} for {}".format(
value, self.metadata["min"], self.path
)
)
# Raise an error if the parameter has a maximum value specified in metadata and the
# value to set is above this
if "max" in self.metadata and value > self.metadata["max"]:
raise ParameterTreeError(
"{} is above the maximum value {} for {}".format(
value, self.metadata["max"], self.path
)
)
# Set the new parameter value, either by calling the setter or updating the local
# value as appropriate
if callable(self._set):
self._set(value)
elif not callable(self._get):
self._get = value
class ParameterTree(object):
"""Class implementing a tree of parameters and their accessors.
This class implements an arbitrarily-structured, recursively-managed tree of parameters and
the appropriate accessor methods that are used to read and write those parameters. Its
particular use is in the definition of a tree of parameters for an API adapter and help
interfacing of those to the underlying device or object.
"""
METADATA_FIELDS = ["name", "description"]
def __init__(self, tree, mutable=False):
"""Initialise the ParameterTree object.
This constructor recursively initialises the ParameterTree object, based on the parameter
tree dictionary passed as an argument. This is done recursively, so that a parameter tree
can have arbitrary depth and contain other ParameterTree instances as necessary.
Initialisation syntax for ParameterTree is made by passing a dict representing the tree
as an argument. Children of a node at any level of the tree are described with
dictionaries/lists e.g.
{"parent" : {"childA" : {...}, "childB" : {...}}}
{"parent" : [{...}, {...}]}
Leaf nodes can be one of the following formats:
value - (value,) - (value, {metadata})
getter - (getter,) - (getter, {metadata})
(getter, setter) - (getter, setter, {metadata})
The following tags will also be treated as metadata:
name - A printable name for that branch of the tree
description - A printable description for that branch of the tree
:param tree: dict representing the parameter tree
:param mutable: Flag, setting the tree
"""
# Flag, if set to true, allows nodes to be replaced and new nodes created
self.mutable = mutable
# list of paths to mutable parts. Not sure this is best solution
self.mutable_paths = []
# Recursively check and initialise the tree
self._tree = self.__recursive_build_tree(tree)
@property
def tree(self):
"""Return tree object for this parameter tree node.
Used internally for recursive descent of parameter trees.
"""
return self._tree
def get(self, path, with_metadata=False):
"""Get the values of parameters in a tree.
This method returns the values at and below a specified path in the parameter tree.
This is done by recursively populating the tree with the current values of parameters,
returning the result as a dictionary.
:param path: path in tree to get parameter values for
:param with_metadata: include metadata in the response when set to True
:returns: dict of parameter tree at the specified path
"""
# Split the path by levels, truncating the last level if path ends in trailing slash
levels = path.split('/')
if levels[-1] == '':
del levels[-1]
# Initialise the subtree before descent
subtree = self._tree
# If this is single level path, return the populated tree at the top level
if not levels:
return self.__recursive_populate_tree(subtree, with_metadata)
# Descend the specified levels in the path, checking for a valid subtree of the appropriate
# type
for level in levels:
if level in self.METADATA_FIELDS and not with_metadata:
raise ParameterTreeError("Invalid path: {}".format(path))
try:
if isinstance(subtree, dict):
subtree = subtree[level]
elif isinstance(subtree, ParameterAccessor):
subtree = subtree.get(with_metadata)[level]
else:
subtree = subtree[int(level)]
except (KeyError, ValueError, IndexError):
raise ParameterTreeError("Invalid path: {}".format(path))
# Return the populated tree at the appropriate path
return self.__recursive_populate_tree({levels[-1]: subtree}, with_metadata)
def set(self, path, data):
"""Set the values of the parameters in a tree.
This method sets the values of parameters in a tree, based on the data passed to it
as a nested dictionary of parameter and value pairs. The updated parameters are merged
into the existing tree recursively.
:param path: path to set parameters for in the tree
:param data: nested dictionary representing values to update at the path
"""
# Expand out any lists/tuples
data = self.__recursive_build_tree(data)
# Get subtree from the node the path points to
levels = path.split('/')
if levels[-1] == '':
del levels[-1]
merge_parent = None
merge_child = self._tree
# Descend the tree and validate each element of the path
for level in levels:
if level in self.METADATA_FIELDS:
raise ParameterTreeError("Invalid path: {}".format(path))
try:
merge_parent = merge_child
if isinstance(merge_child, dict):
merge_child = merge_child[level]
else:
merge_child = merge_child[int(level)]
except (KeyError, ValueError, IndexError):
raise ParameterTreeError("Invalid path: {}".format(path))
# Add trailing / to paths where necessary
if path and path[-1] != '/':
path += '/'
# Merge data with tree
merged = self.__recursive_merge_tree(merge_child, data, path)
# Add merged part to tree, either at the top of the tree or at the
# appropriate level speicfied by the path
if not levels:
self._tree = merged
return
if isinstance(merge_parent, dict):
merge_parent[levels[-1]] = merged
else:
merge_parent[int(levels[-1])] = merged
def delete(self, path=''):
"""
Remove Parameters from a Mutable Tree.
This method deletes selected parameters from a tree, if that tree has been flagged as
Mutable. Deletion of Branch Nodes means all child nodes of that Branch Node are also deleted
:param path: Path to selected Parameter Node in the tree
"""
if not self.mutable and not any(path.startswith(part) for part in self.mutable_paths):
raise ParameterTreeError("Invalid Delete Attempt: Tree Not Mutable")
# Split the path by levels, truncating the last level if path ends in trailing slash
levels = path.split('/')
if levels[-1] == '':
del levels[-1]
subtree = self._tree
if not levels:
subtree.clear()
return
try:
# navigate down the path, based on hwo path navigation works in the Set Method above
for level in levels[:-1]:
# if dict, subtree is normal branch, continue navigation
if isinstance(subtree, dict):
subtree = subtree[level]
else: # if not a dict, but still navigating, it should be a list, so next path is int
subtree = subtree[int(level)]
# once we are at the second to last part of the path, we want to delete whatever comes next
if isinstance(subtree, list):
subtree.pop(int(levels[-1]))
else:
subtree.pop(levels[-1])
except (KeyError, ValueError, IndexError):
raise ParameterTreeError("Invalid path: {}".format(path))
def __recursive_build_tree(self, node, path=''):
"""Recursively build and expand out a tree or node.
This internal method is used to recursively build and expand a tree or node,
replacing elements as found with appropriate types, e.g. ParameterAccessor for
a set/get pair, the internal tree of a nested ParameterTree.
:param node: node to recursively build
:param path: path to node within overall tree
:returns: built node
"""
# If the node is a ParameterTree instance, replace with its own built tree
if isinstance(node, ParameterTree):
if node.mutable:
self.mutable_paths.append(path)
return node.tree # this breaks the mutability of the sub-tree. hmm
# Convert node tuple into the corresponding ParameterAccessor, depending on type of
# fields
if isinstance(node, tuple):
if len(node) == 1:
# Node is (value)
param = ParameterAccessor(path, node[0])
elif len(node) == 2:
if isinstance(node[1], dict):
# Node is (value, {metadata})
param = ParameterAccessor(path, node[0], **node[1])
else:
# Node is (getter, setter)
param = ParameterAccessor(path, node[0], node[1])
elif len(node) == 3 and isinstance(node[2], dict):
# Node is (getter, setter, {metadata})
param = ParameterAccessor(path, node[0], node[1], **node[2])
else:
raise ParameterTreeError("{} is not a valid leaf node".format(repr(node)))
return param
# Convert list or non-callable tuple to enumerated dict
if isinstance(node, list):
return [self.__recursive_build_tree(elem, path=path) for elem in node]
# Recursively check child elements
if isinstance(node, dict):
return {k: self.__recursive_build_tree(
v, path=path + str(k) + '/') for k, v in node.items()}
return node
def __remove_metadata(self, node):
"""Remove metadata fields from a node.
Used internally to return a parameter tree without metadata fields
:param node: tree node to return without metadata fields
:returns: generator yeilding items in node minus metadata
"""
for key, val in node.items():
if key not in self.METADATA_FIELDS:
yield key, val
def __recursive_populate_tree(self, node, with_metadata=False):
"""Recursively populate a tree with values.
This internal method recursively populates the tree with parameter values, or
the results of the accessor getters for nodes. It is called by the get() method to
return the values of parameters in the tree.
:param node: tree node to populate and return
:param with_metadata: include parameter metadata with the tree
:returns: populated node as a dict
"""
# If this is a branch node recurse down the tree
if isinstance(node, dict):
if with_metadata:
branch = {
k: self.__recursive_populate_tree(v, with_metadata) for k, v
in node.items()
}
else:
branch = {
k: self.__recursive_populate_tree(v, with_metadata) for k, v
in self.__remove_metadata(node)
}
return branch
if isinstance(node, list):
return [self.__recursive_populate_tree(item, with_metadata) for item in node]
# If this is a leaf node, check if the leaf is a r/w tuple and substitute the
# read element of that tuple into the node
if isinstance(node, ParameterAccessor):
return node.get(with_metadata)
return node
# Replaces values in data_tree with values from new_data
def __recursive_merge_tree(self, node, new_data, cur_path):
"""Recursively merge a tree with new values.
This internal method recursively merges a tree with new values. Called by the set()
method, this allows parameters to be updated in place with the specified values,
calling the parameter setter in specified in an accessor. The type of any updated
parameters is checked against the existing parameter type.
:param node: tree node to populate and return
:param new_data: dict of new data to be merged in at this path in the tree
:param cur_path: current path in the tree
:returns: the update node at this point in the tree
"""
# Recurse down tree if this is a branch node
if isinstance(node, dict) and isinstance(new_data, dict):
try:
update = {}
for k, v in self.__remove_metadata(new_data):
mutable = self.mutable or any(cur_path.startswith(part) for part in self.mutable_paths)
if mutable and k not in node:
node[k] = {}
update[k] = self.__recursive_merge_tree(node[k], v, cur_path + k + '/')
node.update(update)
return node
except KeyError as key_error:
raise ParameterTreeError(
'Invalid path: {}{}'.format(cur_path, str(key_error)[1:-1])
)
if isinstance(node, list) and isinstance(new_data, dict):
try:
for i, val in enumerate(new_data):
node[i] = self.__recursive_merge_tree(node[i], val, cur_path + str(i) + '/')
return node
except IndexError as index_error:
raise ParameterTreeError(
'Invalid path: {}{} {}'.format(cur_path, str(i), str(index_error))
)
# Update the value of the current parameter, calling the set accessor if specified and
# validating the type if necessary.
if isinstance(node, ParameterAccessor):
node.set(new_data)
else:
# Validate type of new node matches existing
if not self.mutable and type(node) is not type(new_data):
if not any(cur_path.startswith(part) for part in self.mutable_paths):
raise ParameterTreeError('Type mismatch updating {}: got {} expected {}'.format(
cur_path[:-1], type(new_data).__name__, type(node).__name__
))
node = new_data
return node
| 42.35167 | 107 | 0.622536 |
7956a4bc5c4d81d616634d1455c7fc9cfa180fc9 | 3,434 | py | Python | _data/galleries/gallery_creator.py | AsheBlade/ShadowArchive_Template | b2dbc658d3f1e5eb13a0ee2c2137f1aed19ce71a | [
"MIT"
] | null | null | null | _data/galleries/gallery_creator.py | AsheBlade/ShadowArchive_Template | b2dbc658d3f1e5eb13a0ee2c2137f1aed19ce71a | [
"MIT"
] | 31 | 2021-06-15T07:26:34.000Z | 2022-01-15T02:58:24.000Z | _data/galleries/gallery_creator.py | AsheBlade/ShadowArchive_Template | b2dbc658d3f1e5eb13a0ee2c2137f1aed19ce71a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# install imagesize: pip install imagesize
__author__ = 'Olivier Pieters'
__author_email__ = 'me@olivierpieters.be'
__license__ = 'BSD-3-Clause'
import yaml, imagesize
from os import listdir, rename
from os.path import isfile, join
# configuration
output_file = "Icebane_Drawing.yml"
input_file = output_file
image_path = "Icebane_Drawing"
extensions= ['jpg', 'jpeg']
# set correct path
path = join("../../assets/photography/", image_path)
# extract image files
print('Collecting files...')
files = [f for f in listdir(path) if isfile(join(path, f))]
files = [f for f in files if f[f.rfind('.')+1:] in extensions ]
# rename image files
print('Renaming files...')
new_files = []
for f in files:
if f[f.rfind('-')+1:f.rfind('.')] != 'thumbnail':
newf = f[:f.rfind('-')] + "-%sx%s" % imagesize.get(join(path, f)) + f[f.rfind('.'):]
rename(join(path, f),join(path, newf))
else:
newf = f
new_files.append(newf)
files = new_files
# helper objects to store gallery data
new_gallery = {}
thumbs = {}
# group gallery data
print('Grouping files...')
for f in files:
filename = f[:f.rfind('-')]
if f[f.rfind('-')+1:f.rfind('.')] == "thumbnail":
thumbs[filename] = f
else:
if filename in new_gallery:
new_gallery[filename].append(f)
else:
new_gallery[filename] = [f]
# find largest image -> set as original
print('Searching for originals and missing thumbnails...')
originals = {}
for image_set in new_gallery:
max_width, max_height = imagesize.get(join(path, new_gallery[image_set][0]))
min_width, min_height = imagesize.get(join(path, new_gallery[image_set][0]))
original = new_gallery[image_set][0]
thumbnail = new_gallery[image_set][0]
for image in new_gallery[image_set]:
width, height = imagesize.get(join(path, image))
if (width*height) > (max_width*max_height):
original = image
if (width*height) < (min_width*min_height):
thumbnail = image
# delete original from list to avoid double entries
del new_gallery[image_set][new_gallery[image_set].index(original)]
originals[image_set] = original
# add thumbnial if not yet in dict (not removed since might still be useful)
if image_set not in thumbs:
thumbs[image_set] = thumbnail
# try to load YAML data
print('Checking existing YAML data...')
if isfile(input_file):
input_gallery = yaml.load(open(input_file, 'r'))
else:
# create empty dummy file
input_gallery = {"pictures": []}
old_gallery = input_gallery['pictures']
# merge two data sets into one
print('Merging YAML data...')
for pic in new_gallery:
found = False
# try to find matching filename
for i in old_gallery:
if pic == i["filename"]:
i["sizes"] = new_gallery[pic]
# include thumbnail if present
if pic in thumbs:
i["thumbnail"] = thumbs[pic]
found = True
if not found:
# create new entry
old_gallery.append({"filename": pic, "sizes": new_gallery[pic], "thumbnail": thumbs[pic], "original": originals[pic]})
# check if path existing
if "picture_path" not in input_gallery:
input_gallery["picture_path"] = image_path
# write to output file
print('Writing YAML data to file...')
with open(output_file, 'w') as f:
f.write( yaml.dump(input_gallery, default_flow_style=False) )
| 30.936937 | 126 | 0.658125 |
7956a55035c90721d77718c2e33419b2957fdba1 | 2,350 | py | Python | gpytorch/lazy/sum_batch_lazy_tensor.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 2,673 | 2018-02-19T22:28:58.000Z | 2022-03-31T13:22:28.000Z | gpytorch/lazy/sum_batch_lazy_tensor.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 1,415 | 2018-02-19T20:38:20.000Z | 2022-03-30T12:53:13.000Z | gpytorch/lazy/sum_batch_lazy_tensor.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 467 | 2018-03-07T02:06:05.000Z | 2022-03-27T07:05:44.000Z | #!/usr/bin/env python3
import torch
from ..utils.broadcasting import _pad_with_singletons
from ..utils.getitem import _noop_index
from .block_lazy_tensor import BlockLazyTensor
class SumBatchLazyTensor(BlockLazyTensor):
"""
Represents a lazy tensor that is actually the sum of several lazy tensors blocks.
The :attr:`block_dim` attribute specifies which dimension of the base LazyTensor
specifies the blocks.
For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks (a `n x n` matrix).
A `b x k x n x n` tensor represents `k` `b x n x n` blocks (a `b x n x n` batch matrix).
Args:
:attr:`base_lazy_tensor` (LazyTensor):
A `k x n x n` LazyTensor, or a `b x k x n x n` LazyTensor.
:attr:`block_dim` (int):
The dimension that specifies the blocks.
"""
def _add_batch_dim(self, other):
shape = list(other.shape)
expand_shape = list(other.shape)
shape.insert(-2, 1)
expand_shape.insert(-2, self.base_lazy_tensor.size(-3))
other = other.reshape(*shape).expand(*expand_shape)
return other
def _get_indices(self, row_index, col_index, *batch_indices):
# Create an extra index for the summed dimension
sum_index = torch.arange(0, self.base_lazy_tensor.size(-3), device=self.device)
sum_index = _pad_with_singletons(sum_index, row_index.dim(), 0)
row_index = row_index.unsqueeze(-1)
col_index = col_index.unsqueeze(-1)
batch_indices = [index.unsqueeze(-1) for index in batch_indices]
res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, sum_index)
return res.sum(-1)
def _getitem(self, row_index, col_index, *batch_indices):
res = self.base_lazy_tensor._getitem(row_index, col_index, *batch_indices, _noop_index)
return self.__class__(res, **self._kwargs)
def _remove_batch_dim(self, other):
return other.sum(-3)
def _size(self):
shape = list(self.base_lazy_tensor.shape)
del shape[-3]
return torch.Size(shape)
def diag(self):
diag = self.base_lazy_tensor.diag().sum(-2)
return diag
def evaluate(self):
return self.base_lazy_tensor.evaluate().sum(dim=-3) # BlockLazyTensors always use dim3 for the block_dim
| 37.903226 | 113 | 0.671489 |
7956a5f8859e2f6090a31948e9ad10fe42b0442b | 197 | py | Python | 1.Basic/2.py | MajkutP/VisualPython-Fourth-Semester | 738dfbf8daecf27e99b3ecf6687d3f3843c8dad1 | [
"MIT"
] | null | null | null | 1.Basic/2.py | MajkutP/VisualPython-Fourth-Semester | 738dfbf8daecf27e99b3ecf6687d3f3843c8dad1 | [
"MIT"
] | null | null | null | 1.Basic/2.py | MajkutP/VisualPython-Fourth-Semester | 738dfbf8daecf27e99b3ecf6687d3f3843c8dad1 | [
"MIT"
] | null | null | null | def fun(lista, lista2):
ta_sama = 0;
L = []
for i in lista:
for j in lista2:
if i == j and i not in L:
L.append(i)
return L
L1 = [1,5,2,4,8]
L2 = [1,5,3,7,2]
print(fun(L1,L2)) | 14.071429 | 28 | 0.527919 |
7956a639e0637486e687e3c91a45a4959a36c6ff | 7,996 | py | Python | scripts/scz_microarray_combined/1_prepare_feature_label_matrix_old_original.py | omarmaddouri/GCNCC_1 | ec858bbe8246e4af15f7b870ca0ccafdea93d627 | [
"MIT"
] | 4 | 2020-12-03T11:57:15.000Z | 2021-12-09T05:20:44.000Z | scripts/scz_microarray_combined/1_prepare_feature_label_matrix_old_original.py | alkaidone/GCNCC | 3270b4c2d48e0090a18a0ab1df3b9fd81627029d | [
"MIT"
] | 5 | 2020-01-28T23:14:40.000Z | 2021-08-25T15:55:23.000Z | scripts/scz_microarray_combined/1_prepare_feature_label_matrix_old_original.py | alkaidone/GCNCC | 3270b4c2d48e0090a18a0ab1df3b9fd81627029d | [
"MIT"
] | 3 | 2021-11-23T05:13:27.000Z | 2021-12-30T08:12:48.000Z | from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pathlib import Path
import numpy as np
import pandas as pd
from collections import OrderedDict
import itertools
import csv
import os
import sys
project_path = Path(__file__).resolve().parents[2]
sys.path.append(str(project_path))
import mygene
from sklearn.preprocessing import normalize
# Settings
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', 'scz_microarray_combined', 'Dataset string.')
flags.DEFINE_string('gene_expression', '_series_matrix.txt', 'Gene expression suffix.')
flags.DEFINE_string('node_ids', 'ppi.ids.txt', 'Ensembl unique ids file.')
flags.DEFINE_list('labels', ["control", "schizophrenia"], 'List of class labels.')
GSE_files = ["GSE12649", "GSE21138", "GSE53987"]
#Check data availability
if not os.path.isdir("{}/data/raw_input/{}".format(project_path, FLAGS.dataset)):
os.makedirs("{}/data/raw_input/{}".format(project_path, FLAGS.dataset))
if not os.path.isdir("{}/data/parsed_input/{}".format(project_path, FLAGS.dataset)):
os.makedirs("{}/data/parsed_input/{}".format(project_path, FLAGS.dataset))
for gse in GSE_files:
if not os.path.isfile("{}/data/raw_input/{}/{}{}".format(project_path, FLAGS.dataset, gse, FLAGS.gene_expression)):
sys.exit("{}{} file is not available under /data/raw_input/{}".format(gse, FLAGS.gene_expression, FLAGS.dataset))
if not os.path.isfile("{}/data/output/network/{}".format(project_path, FLAGS.node_ids)):
sys.exit("{} mapping file is not available under /data/output/network/".format(FLAGS.node_ids))
print("Generate gene expression matrix...")
labels_row = [34, 36, 28]
control_string = ["control", "Control", "control"]
scz_string = ["schizophrenia", "Scz", "schiz"]
skip_header = [70, 75, 67]
skip_footer = [1, 1, 1]
combined_clinical_status = []
GE_dict = OrderedDict()
for i in range( len(GSE_files) ):
with open("{}/data/raw_input/{}/{}{}".format(project_path, FLAGS.dataset, GSE_files[i], FLAGS.gene_expression), encoding="utf-8") as lines:
clinical_status = np.genfromtxt(itertools.islice(lines, labels_row[i]-1, labels_row[i]), delimiter="\t", dtype=np.dtype(str))
for j in range(len(clinical_status)):
if(clinical_status[j].find(control_string[i]) != -1):
clinical_status[j] = FLAGS.labels[0]
elif(clinical_status[j].find(scz_string[i]) != -1):
clinical_status[j] = FLAGS.labels[1]
indices = np.where(np.logical_or(clinical_status == FLAGS.labels[0], clinical_status == FLAGS.labels[1]))
selected_columns = [y for x in indices for y in x]
clinical_status = clinical_status[selected_columns] #Keep only labels of interest
idx_probe = 0
selected_columns.insert(0,idx_probe) #Add index of first column that contains the probe
expression_matrix = np.genfromtxt("{}/data/raw_input/{}/{}{}".format(project_path, FLAGS.dataset, GSE_files[i], FLAGS.gene_expression), skip_header=skip_header[i], skip_footer=skip_footer[i], usecols=tuple(selected_columns), dtype=np.dtype(str), delimiter="\t")
#Normalize the data in each dataset
expression_matrix[:,1:] = normalize(expression_matrix[:,1:], norm='l1')
GE_dict[i] = pd.DataFrame(data=expression_matrix[:,1:], index=expression_matrix[:,0])
combined_clinical_status += list(clinical_status)
combined_GE = pd.concat([GE_dict[0], GE_dict[1], GE_dict[2]], axis=1, join='inner')
combined_GE.index = combined_GE.index.map(lambda x: x.replace('"', ''))
probes = combined_GE.index.to_numpy()
mg = mygene.MyGeneInfo()
map_ensembl={}
annotations = mg.querymany(probes, scopes='reporter', fields='ensembl.protein', species='human')
#For each query map ENSPs to the reporter with highest score
for response in annotations:
if('ensembl' in response):
matching_score = response['_score']
scope = response['query']
if(isinstance(response['ensembl'],list)):
for prot_dict in response['ensembl']:
if(isinstance(prot_dict['protein'],list)):
for ensp in prot_dict['protein']:
if ensp in map_ensembl:
if(scope not in map_ensembl[ensp]):
map_ensembl[ensp] = [scope, matching_score]
else:
if(map_ensembl[ensp][1] < matching_score):
map_ensembl[ensp] = [scope, matching_score]
else:
map_ensembl[ensp] = [scope, matching_score]
else:
ensp = prot_dict['protein']
if ensp in map_ensembl:
if(scope not in map_ensembl[ensp]):
map_ensembl[ensp] = [scope, matching_score]
else:
if(map_ensembl[ensp][1] < matching_score):
map_ensembl[ensp] = [scope, matching_score]
else:
map_ensembl[ensp] = [scope, matching_score]
elif(isinstance(response['ensembl'],dict)):
prot_dict = response['ensembl']
if(isinstance(prot_dict['protein'],list)):
for ensp in prot_dict['protein']:
if ensp in map_ensembl:
if(scope not in map_ensembl[ensp]):
map_ensembl[ensp] = [scope, matching_score]
else:
if(map_ensembl[ensp][1] < matching_score):
map_ensembl[ensp] = [scope, matching_score]
else:
map_ensembl[ensp] = [scope, matching_score]
else:
ensp = prot_dict['protein']
if ensp in map_ensembl:
if(scope not in map_ensembl[ensp]):
map_ensembl[ensp] = [scope, matching_score]
else:
if(map_ensembl[ensp][1] < matching_score):
map_ensembl[ensp] = [scope, matching_score]
else:
map_ensembl[ensp] = [scope, matching_score]
protein_ids = {}
with open("{}/data/output/network/{}".format(project_path, FLAGS.node_ids)) as f:
for line in f:
(val, key) = line.split() #Read the ID as key
protein_ids[int(key)] = val
ge_dict = combined_GE.T.to_dict('list')#Make the gene expression a dict with the probes as keys
shape_ge = (len(protein_ids), len(combined_clinical_status))
ge_matrix = np.zeros(shape_ge)
for i in range(ge_matrix.shape[0]):
if(protein_ids[i] in map_ensembl and map_ensembl[protein_ids[i]][0] in ge_dict):
ge_matrix[i,:] = ge_dict[map_ensembl[protein_ids[i]][0]]
feature_label = np.zeros((ge_matrix.shape[1], ge_matrix.shape[0]+1), dtype=object) #Additional column for labels
feature_label[:,:-1] = ge_matrix.T
feature_label[:,-1] = combined_clinical_status
np.savetxt("{}/data/parsed_input/{}/feature_label.txt".format(project_path, FLAGS.dataset), feature_label, fmt="%s")
print("Successful generation of feature_label matrix") | 53.306667 | 708 | 0.567909 |
7956a655e70566c35750054a6b8fa28b244f542b | 3,650 | py | Python | soldadox/soldadox/middlewares.py | vcborsolan/soldadox | 6ebe98c3f2a9f1b8e5839357d94a2d1b5ded4af0 | [
"MIT"
] | 1 | 2020-11-16T20:08:40.000Z | 2020-11-16T20:08:40.000Z | soldadox/soldadox/middlewares.py | vcborsolan/soldadox | 6ebe98c3f2a9f1b8e5839357d94a2d1b5ded4af0 | [
"MIT"
] | 5 | 2020-08-12T15:14:11.000Z | 2022-03-02T14:58:44.000Z | soldadox/soldadox/middlewares.py | vcborsolan/soldadox_collector | 6ebe98c3f2a9f1b8e5839357d94a2d1b5ded4af0 | [
"MIT"
] | null | null | null | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class SoldadoxSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SoldadoxDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.096154 | 78 | 0.674521 |
7956a694cf1760cc7f8f5e8a2392e764d03259f2 | 552 | py | Python | tests/commands/test_welcome.py | crakama/bc_7_twitment | 5e068b01b1b4aba968517a50168d3ae8d649fd32 | [
"MIT"
] | 1 | 2016-06-10T17:45:20.000Z | 2016-06-10T17:45:20.000Z | tests/commands/test_welcome.py | crakama/bc_7_twitment | 5e068b01b1b4aba968517a50168d3ae8d649fd32 | [
"MIT"
] | null | null | null | tests/commands/test_welcome.py | crakama/bc_7_twitment | 5e068b01b1b4aba968517a50168d3ae8d649fd32 | [
"MIT"
] | null | null | null | """Tests for our `twitment welcome` subcommand."""
from subprocess import PIPE, Popen as popen
from unittest import TestCase
class TestHello(TestCase):
def test_returns_multiple_lines(self):
output = popen(['twitment', 'welcome'], stdout=PIPE).communicate()[0]
lines = output.split('\n')
self.assertTrue(len(lines) != 1)
def test_returns_hello_world(self):
output = popen(['twitment', 'welcome'], stdout=PIPE).communicate()[0]
self.assertTrue('Welcome to twitter sentiment analysis tool' in output)
| 32.470588 | 79 | 0.688406 |
7956a6c7bc0fcb350ceed5ce276bc9d47e282735 | 876 | py | Python | app/training/test_train.py | azure-walker/MLOpsPython | 710291c3c573acd0d4fcb897150404c7ca9498ca | [
"MIT"
] | null | null | null | app/training/test_train.py | azure-walker/MLOpsPython | 710291c3c573acd0d4fcb897150404c7ca9498ca | [
"MIT"
] | null | null | null | app/training/test_train.py | azure-walker/MLOpsPython | 710291c3c573acd0d4fcb897150404c7ca9498ca | [
"MIT"
] | null | null | null | import numpy as np
from app.training.train import train_model, get_model_metrics
def test_train_model():
X_train = np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1)
y_train = np.array([10, 9, 8, 8, 6, 5])
data = {"train": {"X": X_train, "y": y_train}}
reg_model = train_model(data, {"alpha": 1.2})
preds = reg_model.predict([[1], [2]])
np.testing.assert_almost_equal(preds, [9.93939393939394, 9.03030303030303])
def test_get_model_metrics():
class MockModel:
@staticmethod
def predict(data):
return ([8.12121212, 7.21212121])
X_test = np.array([3, 4]).reshape(-1, 1)
y_test = np.array([8, 7])
data = {"test": {"X": X_test, "y": y_test}}
metrics = get_model_metrics(MockModel(), data)
assert 'mse' in metrics
mse = metrics['mse']
np.testing.assert_almost_equal(mse, 0.029843893480257067)
| 26.545455 | 79 | 0.625571 |
7956a806749cf2f07c7689c1365088590ac7be5d | 232 | py | Python | lab6/__init__.py | kinpa200296/MM_labs | d56f6939e1669c3c8e9943ffb012a91cd2a7c11c | [
"MIT"
] | null | null | null | lab6/__init__.py | kinpa200296/MM_labs | d56f6939e1669c3c8e9943ffb012a91cd2a7c11c | [
"MIT"
] | null | null | null | lab6/__init__.py | kinpa200296/MM_labs | d56f6939e1669c3c8e9943ffb012a91cd2a7c11c | [
"MIT"
] | null | null | null | from channel import Channel
from input import Input
from model import Model
from output import Output
from phase import Phase
from process import Process
from request import Request
from storage import Storage
from unit import Unit
| 23.2 | 27 | 0.844828 |
7956a8137fa87bf2c06bf095b19c3498daf8b2e2 | 174 | py | Python | src/bonus.py | cloudzfy/pychallenge | 1af98a632021532e136721d282b0e7c2cbc519a3 | [
"MIT"
] | 3 | 2016-07-23T03:31:46.000Z | 2019-08-22T01:23:07.000Z | src/bonus.py | cloudzfy/pychallenge | 1af98a632021532e136721d282b0e7c2cbc519a3 | [
"MIT"
] | null | null | null | src/bonus.py | cloudzfy/pychallenge | 1af98a632021532e136721d282b0e7c2cbc519a3 | [
"MIT"
] | 3 | 2017-05-22T09:41:20.000Z | 2018-09-06T02:05:19.000Z | import this
s = 'va gur snpr bs jung?'
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print "".join([d.get(c, c) for c in s]) | 17.4 | 42 | 0.5 |
7956a83533709fe3b0780fd2dad945239f92f440 | 6,449 | py | Python | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | Whilich52/clearml | e862a6de6039a6f226cf0fb65dce71fa220cc175 | [
"Apache-2.0"
] | 1 | 2021-05-22T05:49:51.000Z | 2021-05-22T05:49:51.000Z | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | Whilich52/clearml | e862a6de6039a6f226cf0fb65dce71fa220cc175 | [
"Apache-2.0"
] | null | null | null | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | Whilich52/clearml | e862a6de6039a6f226cf0fb65dce71fa220cc175 | [
"Apache-2.0"
] | 1 | 2021-05-22T05:49:49.000Z | 2021-05-22T05:49:49.000Z | import logging
from clearml import Task
from clearml.automation import (
DiscreteParameterRange, HyperParameterOptimizer, RandomSearch,
UniformIntegerParameterRange)
# trying to load Bayesian optimizer package
try:
from clearml.automation.optuna import OptimizerOptuna # noqa
aSearchStrategy = OptimizerOptuna
except ImportError as ex:
try:
from clearml.automation.hpbandster import OptimizerBOHB # noqa
aSearchStrategy = OptimizerBOHB
except ImportError as ex:
logging.getLogger().warning(
'Apologies, it seems you do not have \'optuna\' or \'hpbandster\' installed, '
'we will be using RandomSearch strategy instead')
aSearchStrategy = RandomSearch
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
print('Job completed!', job_id, objective_value, objective_iteration, job_parameters)
if job_id == top_performance_job_id:
print('WOOT WOOT we broke the record! Objective reached {}'.format(objective_value))
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='Hyper-Parameter Optimization',
task_name='Automatic Hyper-Parameter Optimization',
task_type=Task.TaskTypes.optimizer,
reuse_last_task_id=False)
# experiment template to optimize in the hyper-parameter optimization
args = {
'template_task_id': None,
'run_as_service': False,
}
args = task.connect(args)
# Get the template task experiment that we want to optimize
if not args['template_task_id']:
args['template_task_id'] = Task.get_task(
project_name='examples', task_name='Keras HP optimization base').id
# Set default queue name for the Training tasks themselves.
# later can be overridden in the UI
execution_queue = '1xGPU'
# Example use case:
an_optimizer = HyperParameterOptimizer(
# This is the experiment we want to optimize
base_task_id=args['template_task_id'],
# here we define the hyper-parameters to optimize
# Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>
# For Example, here we see in the base experiment a section Named: "General"
# under it a parameter named "batch_size", this becomes "General/batch_size"
# If you have `argparse` for example, then arguments will appear under the "Args" section,
# and you should instead pass "Args/batch_size"
hyper_parameters=[
UniformIntegerParameterRange('General/layer_1', min_value=128, max_value=512, step_size=128),
UniformIntegerParameterRange('General/layer_2', min_value=128, max_value=512, step_size=128),
DiscreteParameterRange('General/batch_size', values=[96, 128, 160]),
DiscreteParameterRange('General/epochs', values=[30]),
],
# this is the objective metric we want to maximize/minimize
objective_metric_title='epoch_accuracy',
objective_metric_series='epoch_accuracy',
# now we decide if we want to maximize it or minimize it (accuracy we maximize)
objective_metric_sign='max',
# let us limit the number of concurrent experiments,
# this in turn will make sure we do dont bombard the scheduler with experiments.
# if we have an auto-scaler connected, this, by proxy, will limit the number of machine
max_number_of_concurrent_tasks=2,
# this is the optimizer class (actually doing the optimization)
# Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
# more are coming soon...
optimizer_class=aSearchStrategy,
# Select an execution queue to schedule the experiments for execution
execution_queue=execution_queue,
# If specified all Tasks created by the HPO process will be under the `spawned_tasks_project` project
spawn_task_project=None, # 'HPO spawn project',
# If specified only the top K performing Tasks will be kept, the others will be automatically archived
save_top_k_tasks_only=None, # 5,
# Optional: Limit the execution time of a single experiment, in minutes.
# (this is optional, and if using OptimizerBOHB, it is ignored)
time_limit_per_job=10.,
# Check the experiments every 12 seconds is way too often, we should probably set it to 5 min,
# assuming a single experiment is usually hours...
pool_period_min=0.2,
# set the maximum number of jobs to launch for the optimization, default (None) unlimited
# If OptimizerBOHB is used, it defined the maximum budget in terms of full jobs
# basically the cumulative number of iterations will not exceed total_max_jobs * max_iteration_per_job
total_max_jobs=10,
# set the minimum number of iterations for an experiment, before early stopping.
# Does not apply for simple strategies such as RandomSearch or GridSearch
min_iteration_per_job=10,
# Set the maximum number of iterations for an experiment to execute
# (This is optional, unless using OptimizerBOHB where this is a must)
max_iteration_per_job=30,
)
# if we are running as a service, just enqueue ourselves into the services queue and let it run the optimization
if args['run_as_service']:
# if this code is executed by `clearml-agent` the function call does nothing.
# if executed locally, the local process will be terminated, and a remote copy will be executed instead
task.execute_remotely(queue_name='services', exit_process=True)
# report every 12 seconds, this is way too often, but we are testing here J
an_optimizer.set_report_period(2.2)
# start the optimization process, callback function to be called every time an experiment is completed
# this function returns immediately
an_optimizer.start(job_complete_callback=job_complete_callback)
# set the time limit for the optimization process (2 hours)
an_optimizer.set_time_limit(in_minutes=120.0)
# wait until process is done (notice we are controlling the optimization process in the background)
an_optimizer.wait()
# optimization is completed, print the top performing experiments id
top_exp = an_optimizer.get_top_experiments(top_k=3)
print([t.id for t in top_exp])
# make sure background optimization stopped
an_optimizer.stop()
print('We are done, good bye')
| 48.488722 | 112 | 0.745542 |
7956a91ef20ca0f18eb22bf6b6f2b5dee7b6c4b2 | 834 | py | Python | setup.py | sathishpy/EN-Roadmap | 76b0816b1d89d512d511259e475f0be005365fcb | [
"MIT"
] | null | null | null | setup.py | sathishpy/EN-Roadmap | 76b0816b1d89d512d511259e475f0be005365fcb | [
"MIT"
] | null | null | null | setup.py | sathishpy/EN-Roadmap | 76b0816b1d89d512d511259e475f0be005365fcb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in erpnext_roadmap/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('erpnext_roadmap/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='erpnext_roadmap',
version=version,
description='Roadmap tool for ERPNext project',
author='sathishpy@gmail.com',
author_email='sathishpy@gmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
| 30.888889 | 70 | 0.748201 |
7956a946b7bebc5a2677b789bf083cff7d9e42bf | 704 | py | Python | src/python/grpcio_status/grpc_version.py | vsel/grpc | 10dd24dcb971c858f28362f1548cb94f738c10ef | [
"Apache-2.0"
] | 2 | 2020-06-03T06:10:48.000Z | 2020-06-03T06:10:55.000Z | src/python/grpcio_status/grpc_version.py | OneCodeMonkey/grpc | 03fc68bb5a10c2604e299a8089776462efbfc8c7 | [
"Apache-2.0"
] | 62 | 2020-02-27T00:53:36.000Z | 2021-02-05T06:10:53.000Z | src/python/grpcio_status/grpc_version.py | OneCodeMonkey/grpc | 03fc68bb5a10c2604e299a8089776462efbfc8c7 | [
"Apache-2.0"
] | 1 | 2022-02-22T16:32:03.000Z | 2022-02-22T16:32:03.000Z | # Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_status/grpc_version.py.template`!!!
VERSION = '1.30.0.dev0'
| 39.111111 | 97 | 0.762784 |
7956a9bba3dc60110ee542c888cdbc6af18607a4 | 26,206 | py | Python | Game_def.py | DylanR3798/Hockey-World-Cup-Simulatation | 3751206d9bf27095699ba6eb715c28acd1612223 | [
"Apache-2.0"
] | null | null | null | Game_def.py | DylanR3798/Hockey-World-Cup-Simulatation | 3751206d9bf27095699ba6eb715c28acd1612223 | [
"Apache-2.0"
] | null | null | null | Game_def.py | DylanR3798/Hockey-World-Cup-Simulatation | 3751206d9bf27095699ba6eb715c28acd1612223 | [
"Apache-2.0"
] | null | null | null | import random
import time
class Goalkeeper():
"""A definition that produces the attributes for a goalkeeper on the basis of the players overall rating"""
def __init__(self, name, reflexes, jumping, bravery, kicking):
self.name = name
self.reflexes = reflexes
self.jumping = jumping
self.bravery = bravery
self.kicking = kicking
self.overall = int((reflexes + jumping + bravery + kicking)/4)
self.maxipgk = 320
if self.reflexes + self.jumping + self.bravery + self.kicking > self.maxipgk:
raise ValueError("Points maximum exceeded! You points maximum is " + str(self.maxipgk))
if self.reflexes > 100:
raise ValueError("Attacking Points maximum exceeded! You Attacking Points maximum is 100")
if self.jumping > 100:
raise ValueError("Defending Points maximum exceeded! You Defending Points maximum is 100")
if self.bravery > 100:
raise ValueError("Fitness Points maximum exceeded! You Fitness Points maximum is 100")
if self.kicking > 100:
raise ValueError("Pace Points maximum exceeded! You Pace Points maximum is 100")
def __repr__(self):
return repr((self.name, self.reflexes, self.jumping, self.bravery, self.kicking))
class Outfield_Player():
""" A class for a player, Attributes: Name (a string), Position (a definition, string), Overall (a string) """
def __init__(self, name, position, attacking, defending, fitness, pace, passing, skill):
self.name = name
self.position = position
self.attacking = attacking
self.defending = defending
self.fitness = fitness
self.pace = pace
self.passing = passing
self.skill = skill
self.overall = int((attacking + defending + fitness + pace + passing + skill)/6)
self.maxip = 480
if self.attacking + self.defending + self.fitness + self.pace + self.passing + self.skill > self.maxip:
raise ValueError("Points maximum exceeded! You points maximum is " + str(self.maxip))
if self.attacking > 100:
raise ValueError("Attacking Points maximum exceeded! You Attacking Points maximum is 100")
if self.defending > 100:
raise ValueError("Defending Points maximum exceeded! You Defending Points maximum is 100")
if self.fitness > 100:
raise ValueError("Fitness Points maximum exceeded! You Fitness Points maximum is 100")
if self.pace > 100:
raise ValueError("Pace Points maximum exceeded! You Pace Points maximum is 100")
if self.passing > 100:
raise ValueError("Passing Points maximum exceeded! You Passing Points maximum is 100")
if self.skill > 100:
raise ValueError("Skill Points maximum exceeded! You Skill Points maximum is 100")
if self.position not in ['DF','MF','FW']:
raise ValueError("Position not valid. Select from " + "'DF','MF','FW'")
def __repr__(self):
return repr((self.name, self.position, self.attacking, self.defending, self.fitness, self.pace, self.passing, self.skill))
class Team:
""" A class for creating a team with an attacking, defending and overall attribute"""
def __init__(self, name, player1, player2, player3, player4, player5):
self.name = name
self.player1 = player1
self.player2 = player2
self.player3 = player3
self.player4 = player4
self.player5 = player5
self.overall = int((player1.overall + player2.overall + player3.overall + player4.overall + player5.overall)/5)
self.defending = int((player1.overall + player2.defending + player3.defending + player4.defending + player5.defending)/5)
self.attacking = int((player2.attacking + player3.attacking + player4.attacking + player5.attacking)/4)
def __repr__(self):
return repr((self.name, self.overall, self.player1, self.player2, self.player3, self.player4, self.player5))
Vivaldi = Goalkeeper('Juan Vivaldi', 83, 77, 72, 82)
Peillat = Outfield_Player('Gonzalo Peillat', 'DF', 70, 89, 78, 73, 79, 67)
Ortiz = Outfield_Player('Ignacio Ortiz', 'MF', 79, 78, 77, 80, 75, 81)
Rey = Outfield_Player('Matias Rey', 'MF', 81, 77, 74, 72, 87, 72)
Vila = Outfield_Player('Lucas Vila', 'FW', 87, 50, 80, 82, 74, 85)
ARG = Team('Argentina', Vivaldi, Peillat, Ortiz, Rey, Vila)
Charter = Goalkeeper('Andrew Charter', 84, 80, 75, 78)
Dawson = Outfield_Player('Mattew Dawson', 'DF', 74, 86, 80, 79, 80, 81)
Wickham = Outfield_Player('Tom Wickham', 'MF', 79, 78, 80, 82, 80, 81)
Edwards = Outfield_Player('Jeremy Edwards', 'MF', 80, 81, 76, 82, 80, 75)
Craig = Outfield_Player('Tom Craig', 'FW', 95, 65, 81, 82, 77, 80)
AUS = Team('Australia', Charter, Dawson, Wickham, Edwards, Craig)
Mantler = Goalkeeper('Michael Mantler', 64, 67, 62, 69)
Podpera = Outfield_Player('Mathias Podpera', 'DF', 63, 74, 67, 64, 65, 68)
Binder = Outfield_Player('Oliver Binder', 'MF', 76, 70, 62, 74, 66, 67)
Schmidt = Outfield_Player('Bernhard Schmidt', 'MF', 68, 77, 71, 67, 66, 76)
Bele = Outfield_Player('Robert Bele', 'FW', 76, 68, 69, 87, 62, 68)
AUT = Team('Austria', Mantler, Podpera, Binder, Schmidt, Bele)
Vanasch = Goalkeeper('Vincent Vanasch', 80, 77, 70, 79)
Briels = Outfield_Player('Thomas Briels', 'DF', 68, 87, 75, 70, 75, 71)
Boccard = Outfield_Player('Gautheir Boccard', 'MF', 75, 77, 79, 78, 76, 80)
Dockier = Outfield_Player('Sebastian Dockier', 'MF', 79, 78, 70, 71, 81, 70)
Charlier = Outfield_Player('Cedric Charlier', 'FW', 82, 68, 74, 79, 71, 82)
BEL = Team('Belgium', Vanasch, Briels, Boccard, Dockier, Charlier)
Pinner = Goalkeeper('George Pinner', 76, 77, 74, 79)
Dixon = Outfield_Player('Adam Dixon', 'DF', 45, 77, 79, 65, 81, 52)
Middleton = Outfield_Player('Barry Middleton', 'MF', 75, 81, 79, 76, 77, 73)
Martin = Outfield_Player('Harry Martin', 'MF', 79, 78, 73, 79, 81, 78)
Jackson = Outfield_Player('Ashley Jackson', 'FW', 85, 65, 74, 77, 73, 78)
ENG = Team('England', Pinner, Dixon, Middleton, Martin, Jackson)
Cortes = Goalkeeper('Francisco Cortes', 79, 74, 79, 69)
Enrique = Outfield_Player('Sergio Enrique', 'DF', 51, 79, 77, 73, 79, 69)
Alegre = Outfield_Player('David Alegre', 'MF', 75, 68, 75, 73, 74, 76)
Carrera = Outfield_Player('Jardi Carrera', 'MF', 71, 73, 76, 74, 79, 78)
Lleonart = Outfield_Player('Xavi Lleonart', 'FW', 78, 50, 70, 78, 77, 85)
ESP = Team('Spain', Cortes, Enrique, Alegre, Carrera, Lleonart)
Jacobi = Goalkeeper('Niclas Jacobi', 80, 73, 78, 77)
Butt = Outfield_Player('Linus Butt', 'DF', 60, 87, 76, 75, 70, 75)
Tompertz = Outfield_Player('Moritz Tompertz', 'MF', 70, 69, 73, 80, 77, 73)
Herzbruch = Outfield_Player('Timm Herzbruch', 'MF', 81, 73, 72, 74, 75, 73)
Grambusch = Outfield_Player('Tom Grambusch', 'FW', 78, 68, 72, 73, 72, 74)
GER = Team('Germany', Jacobi, Butt, Tompertz, Herzbruch, Grambusch)
Carless = Goalkeeper('Ben Carless', 68, 65, 66, 67)
Kyriakides = Outfield_Player('Dan Kyriakides', 'DF', 63, 74, 67, 63, 66, 65)
Cornick = Outfield_Player('Andrew Cornick', 'MF', 67, 66, 68, 63, 69, 65)
Brignull = Outfield_Player('Liam Brignull', 'MF', 62, 69, 65, 69, 67, 65)
Furlong = Outfield_Player('Gareth Furlong', 'FW', 77, 59, 66, 64, 67, 63)
WAL = Team('Wales', Carless, Kyriakides, Cornick, Brignull, Furlong)
Pieterse = Goalkeeper('Erasmus Pieterse', 75, 69, 74, 71)
Malgraff = Outfield_Player('Ignatius Malgraff', 'DF', 74, 64, 70, 75, 65, 69)
Madsen = Outfield_Player('Lloyd Madsen', 'MF', 65, 67, 66, 73, 79, 70)
Paton = Outfield_Player('Wade Paton', 'MF', 66, 73, 68, 65, 66, 68)
Hykes = Outfield_Player('Julian Hykes', 'FW', 79, 65, 72, 68, 79, 66)
RSA = Team('South Africa', Pieterse, Malgraff, Madsen, Paton, Hykes)
Singh = Goalkeeper('Harmanpreet Singh', 79, 72, 77, 74)
Tirkey = Outfield_Player('Dipsan Tirkey', 'DF', 61, 79, 75, 78, 68, 70)
Sharma = Outfield_Player('Nilakanta Sharma', 'MF', 72, 68, 72, 79, 78, 74)
Qureshi = Outfield_Player('Armaan Qureshi', 'MF', 76, 68, 77, 72, 75, 73)
Yousuf = Outfield_Player('Affan Yousuf', 'FW', 80, 70, 70, 74, 76, 73)
IND = Team('India', Singh, Tirkey, Sharma, Qureshi, Yousuf)
Harte = Goalkeeper('David Harte', 71, 77, 73, 68)
Gormley = Outfield_Player('Ronan Gormley', 'DF', 69, 77, 72, 70, 71, 68)
Watt = Outfield_Player('Michael Watt', 'MF', 61, 78, 79, 77, 73, 70)
Cargo = Outfield_Player('Chris Cargo', 'MF', 80, 64, 71, 74, 67, 73)
Bell = Outfield_Player('Jonny Bell', 'FW', 84, 59, 73, 80, 71, 84)
IRL = Team('Ireland', Harte, Gormley, Watt, Cargo, Bell)
Othman = Goalkeeper('Hafizuddin Othman', 74, 72, 68, 70)
Rahim = Outfield_Player('Razie Rahim', 'DF', 70, 71, 71, 72, 69, 73)
Hassan = Outfield_Player('Azi Hassan', 'MF', 77, 73, 76, 71, 74, 72)
Saari = Outfield_Player('Fitri Saari', 'MF', 76, 71, 67, 68, 70, 72)
Saabah = Outfield_Player('Shahril Saabah', 'FW', 71, 73, 75, 70, 76, 73)
MAL = Team('Malaysia', Othman, Rahim, Hassan, Saari, Saabah)
Ali = Goalkeeper('Amjad Ali', 66, 71, 67, 68)
Mahmood = Outfield_Player('Abu Mahmood', 'DF', 63, 73, 70, 78, 67, 63)
Shaked = Outfield_Player('Ammad Shaked', 'MF', 79, 69, 69, 66, 69, 74)
Ashfaq = Outfield_Player('Nawaz Ashfaq', 'MF', 68, 70, 69, 74, 63, 79)
Abbas = Outfield_Player('Tasawar Abbas', 'FW', 79, 63, 68, 77, 69, 77)
PAK = Team('Pakistan', Ali, Mahmood, Shaked, Ashfaq, Abbas)
Manchester = Goalkeeper('Devon Manchester', 74, 78, 71, 70)
Hilton = Outfield_Player('Blair Hilton', 'DF', 56, 78, 71, 70, 73, 72)
Archibald = Outfield_Player('Ryan Archibald', 'MF', 79, 65, 78, 77, 75, 72)
Child = Outfield_Player('Simon Child', 'MF', 67, 72, 75, 73, 68, 76)
Shaw = Outfield_Player('Bradley Shaw', 'FW', 76, 62, 77, 75, 74, 79)
NZL = Team('New Zealand', Manchester, Hilton, Archibald, Child, Shaw)
Stockmann = Goalkeeper('Jaap Stockmann', 79, 75, 78, 78)
Schuurman = Outfield_Player('Glenn Schuurman', 'DF', 63, 85, 77, 74, 68, 67)
Verga = Outfield_Player('Valetin Verga', 'MF', 72, 73, 74, 75, 75, 76)
Hertzberger = Outfield_Player('Jeroen Hertzberger', 'MF', 78, 78, 72, 73, 80, 71)
Pruyser = Outfield_Player('Micro Pruyser', 'FW', 83, 68, 72, 76, 72, 80)
NED = Team('Netherlands', Stockmann, Schuurman, Verga, Hertzberger, Pruyser)
def CPU_match(team1, team2):
minute = 1
team1players = [team1.player2.name, team1.player3.name, team1.player4.name, team1.player5.name]
team2players = [team2.player2.name, team2.player3.name, team2.player4.name, team2.player5.name]
team1score = []
team2score = []
potentacy = ((team1.attacking+team2.attacking)*(team1.defending+team2.defending))/(10*(team1.overall+team2.overall)**2)
while minute <= 70:
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append('team1')
if attackingteam is team2.name:
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append('team2')
minute += 1
if len(team1score) != len(team2score):
print(team1.name, len(team1score)," - ", len(team2score),team2.name)
else:
while len(team1score) == len(team2score):
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
if attackingteam is team2.name:
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
minute += 1
if len(team1score) != len(team2score):
print(team1.name, len(team1score)," - ", len(team2score),team2.name, " (after extra-time)")
break
if len(team1score) < len(team2score):
return team2
if len(team1score) > len(team2score):
return team1
def create_quarterfinalists():
return []
quarterfinalists=create_quarterfinalists()
def round_of_16_draw(team):
if len(quarterfinalists) < 1:
roundof16teams=[WAL,AUT,RSA,PAK,MAL,IRL,NZL,ESP,ENG,IND,GER,NED,BEL,ARG,AUS]
roosmatchday=[]
opponent = random.choice(roundof16teams)
roundof16teams.pop(roundof16teams.index(opponent))
otherteams= sorted(roundof16teams, key=lambda team: team.name)
i = 0
print(team.name, " v ", opponent.name)
time.sleep(1)
while i<6:
otherteams.reverse()
hometeam = otherteams.pop(int((len(otherteams)-1)/2))
awayteam = otherteams.pop(int((len(otherteams)+1)/2))
roosmatchday.append((hometeam, awayteam))
print(hometeam.name, " v ", awayteam.name)
time.sleep(1)
i += 1
lasthometeam = otherteams.pop()
lastawayteam = otherteams.pop()
roosmatchday.append((lasthometeam, lastawayteam))
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*roosmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
q = 0
print()
print("Results:")
while q < 7:
time.sleep(1)
print()
roogames = CPU_match(team1.pop(), team2.pop())
quarterfinalists.append(roogames)
q += 1
else:
quarterfinalists.clear()
raise ValueError('You already have the Quarter-finalists')
def create_semifinalists():
return []
semifinalists=create_semifinalists()
def quarter_finals_draw(team):
if len(semifinalists) < 1:
quarterfinalteams=list(quarterfinalists)
sfmatchday=[]
opponent = random.choice(quarterfinalteams)
quarterfinalteams.pop(quarterfinalteams.index(opponent))
otherteams2 = sorted(quarterfinalteams, key=lambda team: team.name)
i = 0
print(team.name, " v ", opponent.name)
time.sleep(1)
while i<2:
otherteams2.reverse()
hometeam = otherteams2.pop(int((len(otherteams2)-1)/2))
awayteam = otherteams2.pop(int((len(otherteams2)+1)/2))
sfmatchday.append((hometeam, awayteam))
print(hometeam.name, " v ", awayteam.name)
time.sleep(1)
i += 1
lasthometeam = otherteams2.pop()
lastawayteam = otherteams2.pop()
sfmatchday.append((lasthometeam, lastawayteam))
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*sfmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
q = 0
print()
print("Results:")
while q < 3:
time.sleep(1)
print()
quarterfinalgames = CPU_match(team1.pop(),team2.pop())
semifinalists.append(quarterfinalgames)
q += 1
else:
semifinalists.clear()
raise ValueError('You already have the Semi-finalists')
def create_finalists():
return []
finalists =create_finalists()
def semi_finals_draw(team):
if len(finalists) < 1:
semifinalteams=list(semifinalists)
fmatchday=[]
opponent = random.choice(semifinalteams)
semifinalteams.pop(semifinalteams.index(opponent))
otherteams3 = sorted(semifinalteams, key=lambda team: team.name)
i = 0
time.sleep(1)
print(team.name, " v ", opponent.name)
lasthometeam = otherteams3.pop()
lastawayteam = otherteams3.pop()
fmatchday.append((lasthometeam, lastawayteam))
time.sleep(1)
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*fmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
print()
print("Results:")
print()
semifinalgames = CPU_match(team1.pop(),team2.pop())
finalists.append(semifinalgames)
else:
finalists.clear()
raise ValueError('You already have the results for your finalists')
def commentary(team, otherteam):
teamplayers = [team.player2.name, team.player3.name, team.player4.name, team.player5.name]
otherteamplayers = [otherteam.player2.name, otherteam.player3.name, otherteam.player4.name, otherteam.player5.name]
probs = [0.1,0.225,0.225,0.45]
probs2 = [0.1,0.2,0.2,0.2,0.2,0.1,0.1,0.1]
GSFPC = [' with just the keeper to beat!',' hits it from the top of the D.',' there to tap it in at the back post.']
PFCC = [' in possesion at the moment passing it round the back.', ' win a long corner.', ' under pressure now.']
PFPC = [' plays a long ball forward', ' cuts in from the right.', ' cuts in from the left.']
PEPPC = [' goes round ', ' intercepts the ball due to a poor pass by ']
APFPC = [' centres it from the baseline.',' slaps it to the back post.',' wins a penalty corner.']
teamplayer = str(random.choices(teamplayers, weights=probs, k=1)).replace('[','').replace(']','').replace("'",'')
otherteamplayer = str(random.choices(otherteamplayers, weights=probs, k=1)).replace('[','').replace(']','').replace("'",'')
goalscoring1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
possession2 = str(random.choices(PFCC)).replace('[','').replace(']','').replace("'",'')
possession1 = str(random.choices(PFPC)).replace('[','').replace(']','').replace("'",'')
possession6 = str(random.choices(APFPC)).replace('[','').replace(']','').replace("'",'')
possession5 = str(random.choices(PEPPC)).replace('[','').replace(']','').replace("'",'')
scoringchancecom1 = teamplayer + goalscoring1
possessioncom3 = 'Lovely bit of skill from ' + teamplayer + ' to get out of a sticky situation.'
possessioncom2 = team.name + possession2
possessioncom1 = teamplayer + possession1
possessioncom4 = 'Great pass from ' + teamplayer
possessioncom5 = teamplayer + possession5 + otherteamplayer
possessioncom6 = teamplayer + possession6
scoringchancecom2 = teamplayer + ' gives away a foul in the D! Penalty corner to ' + otherteam.name
possessioncomlist = [possessioncom1, possessioncom2, possessioncom3, possessioncom4, possessioncom5, possessioncom6, scoringchancecom1, scoringchancecom2]
print(" ".join(random.choices(possessioncomlist, weights=probs2, k=1)))
def scoring_chance(team1, team2):
team1chance = random.randint(1, team1.overall)
team2chance = random.randint(1, team2.overall)
if team1chance > team2chance:
return team1.name
if team1chance < team2chance:
return team2.name
def Match(team1, team2):
"""Simulates a match in real-time with teams inputted, if there is a draw at the end of the game the result will be decided by a next goal wins format"""
minute = 1
team1players = [team1.player2.name, team1.player3.name, team1.player4.name, team1.player5.name]
team2players = [team2.player2.name, team2.player3.name, team2.player4.name, team2.player5.name]
probs = [0.1,0.225,0.225,0.45]
team1score = []
team2score = []
GSFPC = [" with just the keeper to beat!"," hits it from the top of the D."," there to tap it in at the back post."]
potentacy = ((team1.attacking+team2.attacking)*(team1.defending+team2.defending))/(10*(team1.overall+team2.overall)**2)
print("Kick-Off")
print()
while minute <= 70:
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
if minute % 5 == 0:
commentary(team1, team2)
print()
time.sleep(1)
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
scorer1 = str(random.choices(team1players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer1 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(scorer1, minute,"'")
print()
time.sleep(1)
if attackingteam is team2.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
scorer2 = str(random.choices(team2players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer2 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(" ", minute,"'", scorer2)
print()
time.sleep(1)
minute += 1
time.sleep(0.5)
if minute == 35:
print("Half-time: ", team1.name, len(team1score)," - ", len(team2score),team2.name)
time.sleep(5)
print()
print("We are underway here for the second half of", team1.name, " v ", team2.name)
time.sleep(2)
print()
print("Full-time: ", team1.name, len(team1score)," - ", len(team2score),team2.name)
print()
time.sleep(2)
if len(team1score) == len(team2score):
print("It's all square here after full time. We are going to golden goal!")
print()
while len(team1score) == len(team2score):
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
scorer1 = str(random.choices(team1players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer1 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(scorer1, minute,"'")
print()
time.sleep(1)
if attackingteam is team2.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
scorer2 = str(random.choices(team2players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer2 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(" ", minute,"'", scorer2)
print()
time.sleep(1)
minute += 1
time.sleep(0.5)
if len(team1score) > len(team2score):
print(team1.name, "have won it in extra time unbelievable scenes!")
print()
if len(team1score) < len(team2score):
print(team2.name, "have won it in extra time unbelievable scenes!")
print()
print("Final Score: ", team1.name, len(team1score)," - ", len(team2score),team2.name) | 41.9296 | 159 | 0.577349 |
7956aa35350aad888e82bec3a75bde3dbdcf72a0 | 1,772 | py | Python | test.py | kchalkowski/python-translation-exercise | daed72ad2eb32443f64a84fdcbb1486bb2ba2fe4 | [
"CC-BY-4.0"
] | null | null | null | test.py | kchalkowski/python-translation-exercise | daed72ad2eb32443f64a84fdcbb1486bb2ba2fe4 | [
"CC-BY-4.0"
] | null | null | null | test.py | kchalkowski/python-translation-exercise | daed72ad2eb32443f64a84fdcbb1486bb2ba2fe4 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
import sys
def translate_sequence(rna_sequence, genetic_code):
"""Translates a sequence of RNA into a sequence of amino acids.
Translates `rna_sequence` into string of amino acids, according to the
`genetic_code` given as a dict. Translation begins at the first position of
the `rna_sequence` and continues until the first stop codon is encountered
or the end of `rna_sequence` is reached.
If `rna_sequence` is less than 3 bases long, or starts with a stop codon,
an empty string is returned.
"""
#if __name__ == '__main__':
rna_seq = ("AUG", "UAC", "UGG", "CAC", "GCU", "ACU", "GCU", "CCA", "UAU", "ACU", "CAC", "CAG", "AAU", "AUC", "AGU", "ACA", "GCG")
genetic_code = {"UUU":"F", "UUC":"F", "UUA":"L", "UUG":"L",
"UCU":"S", "UCC":"s", "UCA":"S", "UCG":"S",
"UAU":"Y", "UAC":"Y", "UAA":"STOP", "UAG":"STOP",
"UGU":"C", "UGC":"C", "UGA":"STOP", "UGG":"W",
"CUU":"L", "CUC":"L", "CUA":"L", "CUG":"L",
"CCU":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"CAU":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"CGU":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"AUU":"I", "AUC":"I", "AUA":"I", "AUG":"M",
"ACU":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"AAU":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"AGU":"S", "AGC":"S", "AGA":"R", "AGG":"R",
"GUU":"V", "GUC":"V", "GUA":"V", "GUG":"V",
"GCU":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"GAU":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"GGU":"G", "GGC":"G", "GGA":"G", "GGG":"G",}
rna_str = "".join(rna_seq)
rna_list = list(rna_seq)
for i in rna_seq:
if len(rna_str) > 3 and rna_seq[0] != "UAA" and rna_seq[0] != "UAG" and rna_seq[0] != "UGA":
print("".join(genetic_code[i]), end = "")
else:
print(" ")
print("\n")
pass
| 38.521739 | 129 | 0.514673 |
7956aa9af132c90477862a416ba73fc736ec14d2 | 11,695 | py | Python | tadacnv/lib/utils.py | jakob-he/TADA | 8db4e0af19b4a32c7071c175a21bbafe3b02cb31 | [
"MIT"
] | 3 | 2021-12-24T18:40:14.000Z | 2022-03-05T22:22:38.000Z | tadacnv/lib/utils.py | jakob-he/TADA | 8db4e0af19b4a32c7071c175a21bbafe3b02cb31 | [
"MIT"
] | 1 | 2020-11-30T09:49:08.000Z | 2021-05-14T12:28:04.000Z | tadacnv/lib/utils.py | jakob-he/TADA | 8db4e0af19b4a32c7071c175a21bbafe3b02cb31 | [
"MIT"
] | 2 | 2021-12-07T17:25:55.000Z | 2021-12-24T18:40:16.000Z | """Helper functions to parse BED files."""
# standart libraries
import pathlib
import json
import gzip
# own libraries
from .bed import Bed
from .bed_class import BedClass
# third party libararies
import numpy as np
import pandas as pd
from scipy import stats, linalg
# sklearn
from sklearn.ensemble._forest import _generate_unsampled_indices
from sklearn.metrics import r2_score
import sys, os
def objects_from_file(path, cls_string, column_names=[], **kwargs):
"""Load a BED file and return a list of Bed objects""
Args:
path: Path to the BED file.
cls_string: A string that matches one of the bed classes (e.g. Gene).
"""
path = validate_file(path)
bed_class = BedClass.from_str(cls_string).get_class()
if path.suffix == '.gz':
open_path = gzip.open(path.absolute())
else:
open_path = path.open()
bed_objects = []
vcf = False
for line in open_path:
if not type(line) == str:
line = line.decode('utf-8')
# handle headers of bed CNV files
if line.startswith('##'):
vcf = True
continue
if line.startswith('#'):
column_names = line.strip().split('\t')[3:]
continue
bed_objects.append(
bed_class(line, column_names, **dict(kwargs, vcf=vcf)))
open_path.close()
return bed_objects
def validate_file(path):
"""Check if the path is a valid BED file and return a pathlib.Path object."""
path = pathlib.Path(path)
# check if path is a valid file
if not path.is_file():
raise Exception(f'{path} is not a valid path')
# check if path is a bed or txt file
# TODO this is just prelimenary check
if path.suffix not in ['.bed', '.txt', '.gz', '.vcf']:
raise Exception(f'{path} is not a bed,txt or gz file')
return path
def create_chr_dictionary_from_beds(beds: [Bed]):
"""Create a dictionary based on bed objects with the chromsomes as keys."""
bed_dict = {}
for bed in beds:
if not bed.chr in bed_dict:
bed_dict[bed.chr] = [bed]
continue
bed_dict[bed.chr].append(bed)
bed_dict = {key: sorted(bed_dict[key]) for key in bed_dict}
return bed_dict
def is_in(bed_list, reference_bed):
"""Returns True if the first element of the list of bed objects is in the reference_bed"""
# return False if the list of bed objects contains no element
if not bed_list:
return False
# check if the first element is in the reference bed object
if bed_list[0].start < reference_bed.end:
return True
else:
return False
def to_bed(bed_elements, output, label=''):
"""Saves the input as bed file.
The input can either be a dict with chromsomes as keys and list of bed elements as items or a list of bed elements.
"""
if type(bed_elements) == list:
bedlist_to_bed(bed_elements, output, label)
elif type(bed_elements) == dict:
chrom_dict_to_bed(bed_elements, output, label)
else:
print('The input has to be a dictionary or list with Bed elements.')
def bedlist_to_bed(bedlist, output, label):
with open(output, 'w') as output:
for bed in bedlist:
output.write(f'{bed.chr}\t{bed.start}\t{bed.end}')
if label:
output.write(f'\t{label}')
output.write('\n')
def chrom_dict_to_bed(chrom_dict, output, label):
with open(output, 'w') as output:
for chrom in chrom_dict:
for bed in chrom_dict[chrom]:
output.write(f'{bed.chr}\t{bed.start}\t{bed.end}')
if label:
output.write(f'\t{label}')
output.write('\n')
def reduce_dict(dictionary, keys):
"""Returns a dictionary containing only the input keys"""
return {key: (dictionary[key] if key in dictionary else []) for key in keys}
def create_annotated_bed_dict(bed_dict, annotation_dicts, annotate=False, gene_annotation=False, feature_type='extended'):
"""Annotates every BED flement with the overlapping annotation.
For each BED element in a chromosome the function iterates through the sorted annotations as long as the
start position of any of the first annotations is less than the end position of the BED element.
If one of the elements satisfies this condition there are three options:
1. The annotation ends before the BED element -> discard it, since there is no overlapping BED element in the data set.
2. The annotation starts in or before the BED element and ends in the BED element -> Add to the BED element's annotations and remove annotation from the list.
2. The annotation starts in or before the BED element and does not end in the BED element -> Add to BED element's annotations but keep it for other BED elements.
Args:
bed_dict: A dictionary with chromsomes as keys and the corresponding BED elements as values.
annotation_dict: A list of dictionaries with chromosomes as keys and the corresponding annotation elements as values.
"""
# reduce the bed_dict to genomic locations where annotations are available
annotation_dicts = {key: reduce_dict(
dictionary, bed_dict.keys()) for key, dictionary in annotation_dicts.items()}
# iterate through chromsomes
for chrom in bed_dict:
for bed_element in bed_dict[chrom]:
annotation_queue = {}
for annotation_name, annotation_dict in annotation_dicts.items():
bed_element.annotations[annotation_name] = []
annotation_queue[annotation_name] = []
while any(is_in(annotation_dict[chrom], bed_element) for annotation_dict in annotation_dicts.values()):
for annotation_name, annotation_dict in annotation_dicts.items():
if is_in(annotation_dict[chrom], bed_element):
if annotation_dict[chrom][0].end < bed_element.start:
annotation_dict[chrom].pop(0)
elif annotation_dict[chrom][0].end <= bed_element.end:
bed_element.annotations[annotation_name].append(
annotation_dict[chrom].pop(0))
elif annotation_dict[chrom][0].end > bed_element.end:
bed_element.annotations[annotation_name].append(
annotation_dict[chrom][0])
annotation_queue[annotation_name].append(
annotation_dict[chrom].pop(0))
annotation_dict[chrom] = annotation_queue[annotation_name] + \
annotation_dict[chrom]
if annotate:
bed_element.annotate(feature_type)
if gene_annotation:
bed_element.filter_exons()
bed_element.filter_interactions()
return bed_dict
def annotate_cnvs(tad_dict, cnv_dict):
"""Finds all TADs overlapping with the CNV, then constructs a new chrom dictionary with the annotated CNVs.
The function iterates through the TADs one chromsome at a time. For each TAD it checks every CNVs were
the start position is either less or equal to the TADs start position. If that is the case there are four possiblities:
1. The CNV ends before the TAD -> this CNV is either not in any of the available TADs or ended in between TADs.
2. The CNV ends in the TAD but starts before it -> append the TAD to the CNV and move the CNV to the list of annotated CNVs.
3. The CNV ends and starts in the TAD -> append the TAD to the CNV and move the CNV to the list of annotated CNVs.
4. Tne CNVs starts before the TAD but ends after it -> append the TAD to the CNV, keep it in the CNV dict.
"""
# reduce the cnvs to chromsomes were tads are available
cnv_dict = reduce_dict(cnv_dict, tad_dict.keys())
# create empty list
annotated_cnvs = []
# iterate through CNVs one chromsome at a time
for chrom in cnv_dict:
for tad in tad_dict[chrom]:
cnv_queue = []
while is_in(cnv_dict[chrom], tad):
if cnv_dict[chrom][0].end <= tad.end and cnv_dict[chrom][0].end >= tad.start:
cnv_dict[chrom][0].tads.append(tad)
annotated_cnvs.append(cnv_dict[chrom].pop(0))
elif cnv_dict[chrom][0].end > tad.end:
cnv_dict[chrom][0].tads.append(tad)
cnv_queue.append(cnv_dict[chrom].pop(0))
else:
annotated_cnvs.append(cnv_dict[chrom].pop(0))
cnv_dict[chrom] = cnv_queue + cnv_dict[chrom]
annotated_cnvs.extend(cnv_dict[chrom])
return create_chr_dictionary_from_beds(annotated_cnvs)
def getOverlap(interval_a, interval_b):
"""Returns the overlap of two intervals"""
return max(0, min(interval_a[1], interval_b[1]) - max(interval_a[0], interval_b[0]))
def getDistance(interval_a, interval_b):
"""Returns the distance between two intervals"""
return max(interval_a[0] - interval_b[1], interval_b[0] - interval_a[1])
def phi_coeff(array_1, array_2):
"""Implementation of the phi coefficient computation."""
cont_tab = pd.crosstab(array_1, array_2)
print(cont_tab)
return (cont_tab[1][1] * cont_tab[0][0] - cont_tab[0][1] * cont_tab[1][0]) / np.sqrt(sum(cont_tab[0]) * sum(cont_tab[1]) * sum(cont_tab[:][1]) * sum(cont_tab[:][0]))
def partial_corr(features, feature_type):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in the feature dataframe, controlling
for the remaining variables in the dataframe. The implementation is based on https://gist.github.com/fabianp/9396204419c7b638d38f.
Args:
feature_df = pandas DataFrame object, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Output:
P_corr = pandas DataFrame object, shape (p, p)
P_corr[i, j] contains the partial correlation of feature_df[:, i] and feature_df[:, j] controlling
for the remaining variables in the feature dataframe.
"""
C = features.values if isinstance(features, pd.DataFrame) else features
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i + 1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def oob_classifier_accuracy(pipeline, X, y):
"""
Compute out-of-bag (OOB) accuracy for a scikit-learn random forest
classifier.
"""
n_samples = len(X)
n_classes = len(np.unique(y))
predictions = np.zeros((n_samples, n_classes))
for tree in pipeline['cls'].estimators_:
unsampled_indices = _generate_unsampled_indices(
tree.random_state, n_samples)
tree_preds = tree.predict_proba(
pipeline[0:2].fit_transform(X[unsampled_indices, :]))
predictions[unsampled_indices] += tree_preds
predicted_class_indexes = np.argmax(predictions, axis=1)
predicted_classes = [pipeline['cls'].classes_[i]
for i in predicted_class_indexes]
oob_score = np.mean(y == predicted_classes)
return oob_score
| 40.749129 | 169 | 0.643437 |
7956abdc7386545d70cbf6b0a3cd1ef61182be50 | 2,294 | py | Python | environments/sumo/TrafficLightPhases.py | Denesh1998/pytorch-a2c-ppo-acktr-gail | fc187845a4c562cbf9b2b2b3afb19b4fdda07a90 | [
"MIT"
] | null | null | null | environments/sumo/TrafficLightPhases.py | Denesh1998/pytorch-a2c-ppo-acktr-gail | fc187845a4c562cbf9b2b2b3afb19b4fdda07a90 | [
"MIT"
] | null | null | null | environments/sumo/TrafficLightPhases.py | Denesh1998/pytorch-a2c-ppo-acktr-gail | fc187845a4c562cbf9b2b2b3afb19b4fdda07a90 | [
"MIT"
] | null | null | null | from _pyio import IOBase
import xml.etree.ElementTree as ElementTree
class TrafficLightPhases():
'''
Contains all phases of all traffic lights that do not involve yellow.
Usually read from a file.
The file follows the SUMO format from
https://sumo.dlr.de/wiki/Simulation/Traffic_Lights#Defining_New_TLS-Programs
We search for <tlLogic> elements in the XML (can be at any depth)
and collect all settings.
Each tlLogic element must have a unique id (traffic light reference).
'''
def __init__(self, filename:str):
'''
@param filename the file containing XML text. NOTE this really
should not be a "filename" but a input stream; unfortunately
ElementTree does not support this.
'''
tree = ElementTree.parse(filename)
self._phases = {}
for element in tree.getroot().findall('tlLogic'):
intersectionid = element.get('id')
if intersectionid in self._phases:
raise Exception('file ' + filename + ' contains multiple tlLogic elements with id=' + id)
newphases = []
for item in element:
state = item.get('state')
if 'y' in state or 'Y' in state:
continue # ignore ones with yY: handled by us.
newphases.append(state)
self._phases[intersectionid] = newphases
def getIntersectionIds(self) -> list:
'''
@return all intersection ids (list of str)
'''
return sorted(list(self._phases.keys()))
def getNrPhases(self, intersectionId:str) -> int:
'''
@param intersectionId the intersection id
@return number of available phases (int).
If n is returned, Phases 0..n-1 are available
'''
return len(self._phases[intersectionId])
def getPhase(self, intersectionId:str, phasenr: int) -> str:
"""
@param intersectionId the intersection id
@param phasenr the short number given to this phase
@return the phase string (eg 'rrGG') for given lightid
and phasenr. Usually this
is the index number in the file, starting at 0.
"""
return self._phases[intersectionId][phasenr]
| 36.412698 | 105 | 0.614211 |
7956ae3455af843e87dbaaaeec8d3542454e9f0f | 3,746 | py | Python | tests/clvm/coin_store.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | tests/clvm/coin_store.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | tests/clvm/coin_store.py | keypool-com/chia-blockchain | 8c96651a78a0ef6694197c0070f4631fc4b1bf45 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, List
from chia.consensus.blockchain_check_conditions import blockchain_check_conditions_dict
from chia.types.announcement import Announcement
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.spend_bundle import SpendBundle
from chia.util.condition_tools import conditions_dict_for_solution, created_announcements_for_conditions_dict
from chia.util.ints import uint32, uint64
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index = defaultdict(list)
def farm_coin(self, puzzle_hash: bytes32, birthday: CoinTimestamp, amount: int = 1024) -> Coin:
parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
) -> int:
# this should use blockchain consensus code
announcements: List[Announcement] = []
conditions_dicts = []
for coin_solution in spend_bundle.coin_solutions:
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution
)
if conditions_dict is None:
raise BadSpendBundleError(f"clvm validation failure {err}")
conditions_dicts.append(conditions_dict)
announcements.extend(created_announcements_for_conditions_dict(conditions_dict, coin_solution.coin.name()))
for coin_solution, conditions_dict in zip(spend_bundle.coin_solutions, conditions_dicts):
prev_transaction_block_height = now.height
timestamp = now.seconds
coin_record = self._db[coin_solution.coin.name()]
err = blockchain_check_conditions_dict(
coin_record, announcements, conditions_dict, uint32(prev_transaction_block_height), uint64(timestamp)
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {err}")
return 0
def update_coin_store_for_spend_bundle(self, spend_bundle: SpendBundle, now: CoinTimestamp):
err = self.validate_spend_bundle(spend_bundle, now)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
for spent_coin in spend_bundle.removals():
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
for new_coin in spend_bundle.additions():
self._add_coin_entry(new_coin, now)
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
assert name not in self._db
self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0), False, False, uint64(birthday.seconds))
self._ph_index[coin.puzzle_hash].append(name)
| 40.717391 | 119 | 0.701281 |
7956aee93ebd8c3c12475be3f53d9b5c92283c81 | 5,184 | py | Python | flask_redoc/__init__.py | whitebarry/flask-redoc | d2ecc82e106a298ba8cdfab221e7c7d75ddfc76e | [
"MIT"
] | 18 | 2020-03-16T14:01:30.000Z | 2021-08-24T01:12:11.000Z | flask_redoc/__init__.py | whitebarry/flask-redoc | d2ecc82e106a298ba8cdfab221e7c7d75ddfc76e | [
"MIT"
] | 3 | 2020-10-13T18:56:20.000Z | 2021-11-22T21:45:10.000Z | flask_redoc/__init__.py | whitebarry/flask-redoc | d2ecc82e106a298ba8cdfab221e7c7d75ddfc76e | [
"MIT"
] | 6 | 2020-05-01T00:45:40.000Z | 2022-03-25T05:02:40.000Z | """flask_redoc Module."""
import copy
import json
import os
import yaml
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
from flask import Blueprint, render_template
from jsonmerge import merge
from .version import __version__
class Redoc(object):
"""Redoc Object."""
DEFAULT_CONFIG = {
'endpoint': 'docs',
'spec_route': '/docs',
'static_url_path': '/redoc_static',
'title': 'ReDoc',
'version': '1.0.0',
'openapi_version': '3.0.2',
'use_cdn': True,
'info':dict(),
'plugins': [FlaskPlugin()],
'marshmallow_schemas': list()
}
def __init__(self, app=None, spec_file=None, config=None):
"""Init the Redoc object.
:param spec_file: spec file path
:param app: Flask app
:param config: dictionary with Redoc configuration
"""
self.app = app
self.spec_file = spec_file
self.config = config or self.DEFAULT_CONFIG.copy()
self.spec = None
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the Redoc.
:param app: Flask app
"""
self.app = app
self.config.update(self.app.config.get('REDOC', {}))
if len(self.config['marshmallow_schemas']) > 0:
self.config['plugins'].append(MarshmallowPlugin())
if self.spec_file is not None:
self.spec_file = self.load_spec_file(self.spec_file)
self.config['title'] = self.spec_file['info']['title']
self.config['version'] = self.spec_file['info']['version']
self.config['openapi_version'] = self.spec_file['openapi']
self.config['info'] = self.spec_file['info']
self.spec = APISpec(title=self.config['title'],
version=self.config['version'],
openapi_version=self.config['openapi_version'],
info=self.config['info'],
plugins=self.config['plugins'])
self.app.before_first_request(self.docstrings_to_openapi)
bp = Blueprint(self.config.get('endpoint', 'redoc'),
__name__,
url_prefix=self.config.get('url_prefix', None),
template_folder=self.config.get(
'template_folder', 'templates'),
static_folder=self.config.get(
'static_folder', 'static'),
static_url_path=self.config.get('static_url_path'))
bp.add_url_rule(self.config.get('spec_route'),
'docs', view_func=self.docs_view)
bp.add_url_rule(self.config.get('spec_route')+'/json',
'docs_json', view_func=self.docs_json)
self.app.register_blueprint(bp)
def docs_view(self):
"""Render the docs.html template."""
return render_template('docs.html',
spec_file=self.spec_file,
endpoint=self.config.get('endpoint', 'redoc'),
title=self.config.get('title', 'ReDoc'),
use_cdn=self.config.get('use_cdn', True))
def docs_json(self):
return self.spec_file
def load_spec_file(self, filename):
"""Load the spec file.
:param filename: spec filename
:return: spec as dict
"""
if not filename.startswith('/'):
filename = os.path.join(
self.app.root_path,
filename
)
with open(filename) as file:
if filename.endswith(".yml") or filename.endswith(".yaml"):
retval = yaml.safe_load(file)
else:
retval = json.load(file)
return retval
def docstrings_to_openapi(self):
"""Transform Flask docstring documentation to openapi spec.
"""
for schema in self.config['marshmallow_schemas']:
self.spec.components.schema(schema.__name__, schema=schema)
for view_name, view_func in self.app.view_functions.items():
if view_func.__doc__ is not None:
self.spec.path(view=view_func)
self.spec_file = strip_empties_from_dict(merge(self.spec_file, self.spec.to_dict()))
def strip_empties_from_list(data):
"""Strip empty list
"""
new_data = []
for v in data:
if isinstance(v, dict):
v = strip_empties_from_dict(v)
elif isinstance(v, list):
v = strip_empties_from_list(v)
if v not in (None, str(), list(), dict(),):
new_data.append(v)
return new_data
def strip_empties_from_dict(data):
"""Strip empty dict
"""
new_data = {}
for k, v in data.items():
if isinstance(v, dict):
v = strip_empties_from_dict(v)
elif isinstance(v, list):
v = strip_empties_from_list(v)
if v not in (None, str(), list(), dict(),):
new_data[k] = v
return new_data
| 32.198758 | 92 | 0.563465 |
7956aef896f98ee6c14c58adefdd2bee7190aa13 | 3,608 | py | Python | utility.py | hbdat/cvpr20_IMCL | a71b592e006778f5fff05976acd64b3b412c7f4a | [
"MIT"
] | 18 | 2020-07-12T14:19:51.000Z | 2022-03-28T12:04:28.000Z | utility.py | hbdat/cvpr20_IMCL | a71b592e006778f5fff05976acd64b3b412c7f4a | [
"MIT"
] | 5 | 2020-12-08T05:04:33.000Z | 2022-01-15T06:17:11.000Z | utility.py | hbdat/cvpr20_IMCL | a71b592e006778f5fff05976acd64b3b412c7f4a | [
"MIT"
] | 3 | 2020-07-24T14:37:30.000Z | 2021-07-20T05:39:26.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 12:34:41 2019
@author: badat
"""
from sklearn.metrics import average_precision_score
import numpy as np
import tensorflow as tf
#%%
def compute_AP(Prediction,Label,names):
num_class = Prediction.shape[1]
ap=np.zeros(num_class)
for idx_cls in range(num_class):
prediction = np.squeeze(Prediction[:,idx_cls])
label = np.squeeze(Label[:,idx_cls])
mask = np.abs(label)==1
if np.sum(label>0)==0:
continue
binary_label=np.clip(label[mask],0,1)
ap[idx_cls]=average_precision_score(binary_label,prediction[mask])#AP(prediction,label,names)
return ap
#%%
def LoadLabelMap(labelmap_path, dict_path):
labelmap = [line.rstrip() for line in tf.gfile.GFile(labelmap_path)]
label_dict = {}
for line in tf.gfile.GFile(dict_path):
words = [word.strip(' "\n') for word in line.split(',', 1)]
label_dict[words[0]] = words[1]
return labelmap, label_dict
#%% Dataset
image_size = resnet_v1.resnet_v1_101.default_image_size
height = image_size
width = image_size
def PreprocessImage(image, network='resnet_v1_101'):
# If resolution is larger than 224 we need to adjust some internal resizing
# parameters for vgg preprocessing.
preprocessing_kwargs = {}
preprocessing_fn = preprocessing_factory.get_preprocessing(name=network, is_training=False)
height = image_size
width = image_size
image = preprocessing_fn(image, height, width, **preprocessing_kwargs)
image.set_shape([height, width, 3])
return image
def read_img(img_id,data_path):
compressed_image = tf.read_file(data_path+img_id+'.jpg', 'rb')
image = tf.image.decode_jpeg(compressed_image, channels=3)
processed_image = PreprocessImage(image)
return processed_image
def parser_train(record):
feature = {'img_id': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)}
parsed = tf.parse_single_example(record, feature)
img_id = parsed['img_id']
label = tf.decode_raw( parsed['label'],tf.int32)
img = read_img(img_id,train_data_path)
return img_id,img,label
def parser_validation(record):
feature = {'img_id': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)}
parsed = tf.parse_single_example(record, feature)
img_id = parsed['img_id']
label = tf.decode_raw( parsed['label'],tf.int32)
img = read_img(img_id,validation_data_path)
return img_id,img,label
#%%
def construct_dictionary(batch_attribute,batch_id,sparse_dict_Attribute_f,sparse_dict_img,n_neighbour):
similar_score=np.matmul(np.clip(batch_attribute,-1/c,1),np.clip(np.transpose(sparse_dict_Attribute_f),-1/c,1))
m_similar_index=np.argsort(similar_score,axis=1)[:,0:n_neighbour]
index_dict = m_similar_index.flatten()
return sparse_dict_Attribute_f[index_dict,:],sparse_dict_img[index_dict,:,:]
def compute_feature_prediction_large_batch(img):
prediction_l = []
feature_l = []
for idx_partition in range(img.shape[0]//partition_size+1):
prediction_partition,feature_partition = sess.run([Prediction,features_concat],{img_input_ph:img[idx_partition*partition_size:(idx_partition+1)*partition_size,:,:,:]})
prediction_l.append(prediction_partition)
feature_l.append(feature_partition)
prediction = np.concatenate(prediction_l)
feature = np.concatenate(feature_l)
return prediction,feature | 38.795699 | 176 | 0.693459 |
7956b08d2033ff275b83e1338b95b1e1ad9b0db5 | 2,425 | py | Python | Notebooks/SVM/zh-cn/Util.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | 1,107 | 2016-09-21T02:18:36.000Z | 2022-03-29T02:52:12.000Z | Notebooks/SVM/zh-cn/Util.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | 18 | 2016-12-22T10:24:47.000Z | 2022-03-11T23:18:43.000Z | Notebooks/SVM/zh-cn/Util.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | 776 | 2016-12-21T12:08:08.000Z | 2022-03-21T06:12:08.000Z | import numpy as np
import matplotlib.pyplot as plt
from math import pi
np.random.seed(142857)
# 生成简单的测试数据集
def gen_two_clusters(size=100, center=0, scale=1, dis=2):
center1 = (np.random.random(2) + center - 0.5) * scale + dis
center2 = (np.random.random(2) + center - 0.5) * scale - dis
cluster1 = (np.random.randn(size, 2) + center1) * scale
cluster2 = (np.random.randn(size, 2) + center2) * scale
data = np.vstack((cluster1, cluster2)).astype(np.float32)
labels = np.array([1] * size + [-1] * size)
indices = np.random.permutation(size * 2)
data, labels = data[indices], labels[indices]
return data, labels
# 生成螺旋线数据集
def gen_spiral(size=50, n=4, scale=2):
xs = np.zeros((size * n, 2), dtype=np.float32)
ys = np.zeros(size * n, dtype=np.int8)
for i in range(n):
ix = range(size * i, size * (i + 1))
r = np.linspace(0.0, 1, size+1)[1:]
t = np.linspace(2 * i * pi / n, 2 * (i + scale) * pi / n, size) + np.random.random(size=size) * 0.1
xs[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
ys[ix] = 2 * (i % 2) - 1
return xs, ys
# 画出决策边界;如果只关心算法本身,可以略去这一段代码不看
def visualize2d(clf, x, y, draw_background=False):
axis, labels = np.array(x).T, np.array(y)
decision_function = lambda xx: clf.predict(xx)
nx, ny, padding = 400, 400, 0.2
x_min, x_max = np.min(axis[0]), np.max(axis[0])
y_min, y_max = np.min(axis[1]), np.max(axis[1])
x_padding = max(abs(x_min), abs(x_max)) * padding
y_padding = max(abs(y_min), abs(y_max)) * padding
x_min -= x_padding
x_max += x_padding
y_min -= y_padding
y_max += y_padding
def get_base(nx, ny):
xf = np.linspace(x_min, x_max, nx)
yf = np.linspace(y_min, y_max, ny)
n_xf, n_yf = np.meshgrid(xf, yf)
return xf, yf, np.c_[n_xf.ravel(), n_yf.ravel()]
xf, yf, base_matrix = get_base(nx, ny)
z = decision_function(base_matrix).reshape((nx, ny))
labels[labels == -1] = 0
n_label = len(np.unique(labels))
xy_xf, xy_yf = np.meshgrid(xf, yf, sparse=True)
colors = plt.cm.rainbow([i / n_label for i in range(n_label)])[labels]
plt.figure()
if draw_background:
plt.pcolormesh(xy_xf, xy_yf, z, cmap=plt.cm.Paired)
else:
plt.contour(xf, yf, z, c='k-', levels=[0])
plt.scatter(axis[0], axis[1], c=colors)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
| 34.15493 | 107 | 0.602887 |
7956b10346741045e6c9f5d2f9cdf89ff447dd08 | 3,922 | py | Python | isi_sdk_8_2_2/test/test_namespace_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/test/test_namespace_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/test/test_namespace_api.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.api.namespace_api import NamespaceApi # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestNamespaceApi(unittest.TestCase):
"""NamespaceApi unit test stubs"""
def setUp(self):
self.api = isi_sdk_8_2_2.api.namespace_api.NamespaceApi() # noqa: E501
def tearDown(self):
pass
def test_copy_directory(self):
"""Test case for copy_directory
"""
pass
def test_copy_file(self):
"""Test case for copy_file
"""
pass
def test_create_access_point(self):
"""Test case for create_access_point
"""
pass
def test_create_directory(self):
"""Test case for create_directory
"""
pass
def test_create_directory_with_access_point_container_path(self):
"""Test case for create_directory_with_access_point_container_path
"""
pass
def test_create_file(self):
"""Test case for create_file
"""
pass
def test_delete_access_point(self):
"""Test case for delete_access_point
"""
pass
def test_delete_directory(self):
"""Test case for delete_directory
"""
pass
def test_delete_directory_with_access_point_container_path(self):
"""Test case for delete_directory_with_access_point_container_path
"""
pass
def test_delete_file(self):
"""Test case for delete_file
"""
pass
def test_get_acl(self):
"""Test case for get_acl
"""
pass
def test_get_directory_attributes(self):
"""Test case for get_directory_attributes
"""
pass
def test_get_directory_contents(self):
"""Test case for get_directory_contents
"""
pass
def test_get_directory_metadata(self):
"""Test case for get_directory_metadata
"""
pass
def test_get_directory_with_access_point_container_path(self):
"""Test case for get_directory_with_access_point_container_path
"""
pass
def test_get_file_attributes(self):
"""Test case for get_file_attributes
"""
pass
def test_get_file_contents(self):
"""Test case for get_file_contents
"""
pass
def test_get_file_metadata(self):
"""Test case for get_file_metadata
"""
pass
def test_get_worm_properties(self):
"""Test case for get_worm_properties
"""
pass
def test_list_access_points(self):
"""Test case for list_access_points
"""
pass
def test_move_directory(self):
"""Test case for move_directory
"""
pass
def test_move_directory_with_access_point_container_path(self):
"""Test case for move_directory_with_access_point_container_path
"""
pass
def test_move_file(self):
"""Test case for move_file
"""
pass
def test_query_directory(self):
"""Test case for query_directory
"""
pass
def test_set_acl(self):
"""Test case for set_acl
"""
pass
def test_set_directory_metadata(self):
"""Test case for set_directory_metadata
"""
pass
def test_set_file_metadata(self):
"""Test case for set_file_metadata
"""
pass
def test_set_worm_properties(self):
"""Test case for set_worm_properties
"""
pass
if __name__ == '__main__':
unittest.main()
| 19.320197 | 79 | 0.613208 |
7956b12f7c6d75bf989f7b5b9545962e96c1d2f5 | 14,604 | py | Python | models/ConfigBuilder.py | GGarcia93/pycryptobot | 74b7d2dcbafd47419d02f179cbdc2d8086399b0f | [
"Apache-2.0"
] | null | null | null | models/ConfigBuilder.py | GGarcia93/pycryptobot | 74b7d2dcbafd47419d02f179cbdc2d8086399b0f | [
"Apache-2.0"
] | null | null | null | models/ConfigBuilder.py | GGarcia93/pycryptobot | 74b7d2dcbafd47419d02f179cbdc2d8086399b0f | [
"Apache-2.0"
] | 1 | 2022-02-02T02:55:14.000Z | 2022-02-02T02:55:14.000Z | """config.json Configuration Builder"""
from os.path import isfile
from json import dump
from re import compile as re_compile
from sys import exit as sys_exit
class ConfigBuilder():
def __init__(self) -> None:
self._b = 0
self._c = 0
self._k = 0
self._t = 0
def init(self) -> None:
print("*** config.json Configuration Builder ***")
if isfile('config.json'):
print("config.json already exists.")
sys_exit()
config = {}
choice = input("Do you want to use the Coinbase Pro exchange (1=yes, 2=no:default)? ")
if choice == '1':
self._c = 1
config['coinbasepro'] = {}
config['coinbasepro']['api_url'] = 'https://api.pro.coinbase.com'
choice = input("Do you have API keys for the Coinbase Pro exchange (1=yes, 2=no:default)? ")
if choice == '1':
while 'api_key' not in config['coinbasepro']:
api_key = input("What is your Coinbase Pro API Key? ")
p = re_compile(r"^[a-f0-9]{32,32}$")
if p.match(api_key):
config['coinbasepro']['api_key'] = api_key
while 'api_secret' not in config['coinbasepro']:
api_secret = input("What is your Coinbase Pro API Secret? ")
p = re_compile(r"^[A-z0-9+\/]+==$")
if p.match(api_secret):
config['coinbasepro']['api_secret'] = api_secret
while 'api_passphrase' not in config['coinbasepro']:
api_passphrase = input("What is your Coinbase Pro API Passphrase? ")
p = re_compile(r"^[a-z0-9]{10,11}$")
if p.match(api_passphrase):
config['coinbasepro']['api_passphrase'] = api_passphrase
else:
config['coinbasepro']['api_key'] = '<fill in>'
config['coinbasepro']['api_secret'] = '<fill in>'
config['coinbasepro']['api_passphrase'] = '<fill in>'
config['coinbasepro']['config'] = {}
while 'base_currency' not in config['coinbasepro']['config']:
base_currency = input("What is your Coinbase Pro base currency (what you are buying) E.g. BTC? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(base_currency):
config['coinbasepro']['config']['base_currency'] = base_currency
while 'quote_currency' not in config['coinbasepro']['config']:
quote_currency = input("What is your Coinbase Pro quote currency (what you are buying with) E.g. GBP? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(quote_currency):
config['coinbasepro']['config']['quote_currency'] = quote_currency
choice = input("Do you want to smart switch between 1 hour and 15 minute intervals (1=yes:default, 2=no)? ")
if choice == '2':
while 'granularity' not in config['coinbasepro']['config']:
choice = input("What granularity do you want to trade? (60, 300, 900, 3600, 10800, 21600, 86400)? ")
if int(choice) in [60, 300, 900, 3600, 10800, 21600, 86400]:
config['coinbasepro']['config']['granularity'] = int(choice)
choice = input("Do you want to start live trading? (1=live, 2=test:default)? ")
if choice == '1':
config['coinbasepro']['config']['live'] = 1
else:
config['coinbasepro']['config']['live'] = 0
choice = input("Do you want to use the Binance exchange (1=yes, 2=no:default)? ")
if choice == '1':
self._b = 1
config['binance'] = {}
config['binance']['api_url'] = 'https://api.binance.com'
choice = input("Do you have API keys for the Binance exchange (1=yes, 2=no:default)? ")
if choice == '1':
while 'api_key' not in config['binance']:
api_key = input("What is your Binance API Key? ")
p = re_compile(r"^[A-z0-9]{64,64}$")
if p.match(api_key):
config['binance']['api_key'] = api_key
while 'api_secret' not in config['binance']:
api_secret = input("What is your Binance API Secret? ")
p = re_compile(r"^[A-z0-9]{64,64}$")
if p.match(api_secret):
config['binance']['api_secret'] = api_secret
else:
config['binance']['api_key'] = '<fill in>'
config['binance']['api_secret'] = '<fill in>'
config['binance']['config'] = {}
while 'base_currency' not in config['binance']['config']:
base_currency = input("What is your Binance base currency (what you are buying) E.g. BTC? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(base_currency):
config['binance']['config']['base_currency'] = base_currency
while 'quote_currency' not in config['binance']['config']:
quote_currency = input("What is your Binance quote currency (what you are buying with) E.g. GBP? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(quote_currency):
config['binance']['config']['quote_currency'] = quote_currency
choice = input("Do you want to smart switch between 1 hour and 15 minute intervals (1=yes:default, 2=no)? ")
if choice == '2':
while 'granularity' not in config['binance']['config']:
choice = input("What granularity do you want to trade? (1m, 5m, 15m, 1h, 3h, 6h, 1d)? ")
if choice in ['1m', '5m', '15m', '1h', '3h','6h', '1d']:
config['binance']['config']['granularity'] = choice
choice = input("Do you want to start live trading? (1=live, 2=test:default)? ")
if choice == '1':
config['binance']['config']['live'] = 1
else:
config['binance']['config']['live'] = 0
choice = input("Do you want to use the Kucoin exchange (1=yes, 2=no:default)? ")
if choice == '1':
self._K = 1
config['kucoin'] = {}
config['kucoin']['api_url'] = 'https://api.kucoin.com'
choice = input("Do you have API keys for the Kucoin exchange (1=yes, 2=no:default)? ")
if choice == '1':
while 'api_key' not in config['kucoin']:
api_key = input("What is your Kucoin API Key? ")
p = re_compile(r"^[a-f0-9]{24,24}$")
if p.match(api_key):
config['kucoin']['api_key'] = api_key
while 'api_secret' not in config['kucoin']:
api_secret = input("What is your Kucoin API Secret? ")
p = re_compile(r"^[A-z0-9-]{36,36}$")
if p.match(api_secret):
config['kucoin']['api_secret'] = api_secret
while 'api_passphrase' not in config['kucoin']:
api_passphrase = input("What is your Kucoin API Passphrase? ")
p = re_compile(r"^[A-z0-9#$%=@!{},`~&*()<>?.:;_|^/+\[\]]{8,32}$")
if p.match(api_passphrase):
config['kucoin']['api_passphrase'] = api_passphrase
else:
config['kucoin']['api_key'] = '<fill in>'
config['kucoin']['api_secret'] = '<fill in>'
config['kucoin']['api_passphrase'] = '<fill in>'
config['kucoin']['config'] = {}
while 'base_currency' not in config['kucoin']['config']:
base_currency = input("What is your Kucoin base currency (what you are buying) E.g. BTC? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(base_currency):
config['kucoin']['config']['base_currency'] = base_currency
while 'quote_currency' not in config['kucoin']['config']:
quote_currency = input("What is your Kucoin quote currency (what you are buying with) E.g. GBP? ")
p = re_compile(r"^[A-Z0-9]{3,7}$")
if p.match(quote_currency):
config['kucoin']['config']['quote_currency'] = quote_currency
choice = input("Do you want to smart switch between 1 hour and 15 minute intervals (1=yes:default, 2=no)? ")
if choice == '2':
while 'granularity' not in config['kucoin']['config']:
choice = input("What granularity do you want to trade? (60, 300, 900, 3600, 10800, 21600, 86400)? ")
if int(choice) in [60, 300, 900, 3600, 10800, 21600, 86400]:
config['kucoin']['config']['granularity'] = int(choice)
choice = input("Do you want to start live trading? (1=live, 2=test:default)? ")
if choice == '1':
config['kucoin']['config']['live'] = 1
else:
config['kucoin']['config']['live'] = 0
if self._b == 1 or self._c == 1 or self._k == 1:
choice = input("Do you have a Telegram Token and Client ID (1=yes, 2=no:default)? ")
if choice == '1':
self._t = 1
config['telegram'] = {}
while 'token' not in config['telegram']:
token = input("What is your Telegram token? ")
p = re_compile(r"^\d{1,10}:[A-z0-9-_]{35,35}$")
if p.match(token):
config['telegram']['token'] = token
while 'client_id' not in config['telegram']:
client_id = input("What is your Telegram client ID? ")
p = re_compile(r"^-*\d{7,10}$")
if p.match(client_id):
config['telegram']['client_id'] = client_id
choice = input("Do you want to ever sell at a loss even to minimise losses (1:yes, 2=no:default)? ")
if choice == '1':
if self._c == 1:
config['coinbasepro']['config']['sellatloss'] = 1
if self._b == 1:
config['binance']['config']['sellatloss'] = 1
if self._k == 1:
config['kucoin']['config']['sellatloss'] = 1
choice = input("Do you want to sell at the next resistance? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['sellatresistance'] = 1
if self._b == 1:
config['binance']['config']['sellatresistance'] = 1
if self._k == 1:
config['kucoin']['config']['sellatresistance'] = 1
choice = input("Do you only want to trade in a bull market SMA50 > SMA200? (1:yes, 2=no:default)? ")
if choice != '1':
if self._c == 1:
config['coinbasepro']['config']['disablebullonly'] = 1
if self._b == 1:
config['binance']['config']['disablebullonly'] = 1
if self._k == 1:
config['kucoin']['config']['disablebullonly'] = 1
choice = input("Do you want to avoid buying when the price is too high? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['disablebuynearhigh'] = 1
if self._b == 1:
config['binance']['config']['disablebuynearhigh'] = 1
if self._k == 1:
config['kucoin']['config']['disablebuynearhigh'] = 1
choice = input("Do you want to disable the On-Balance Volume (OBV) technical indicator on buys? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['disablebuyobv'] = 1
if self._b == 1:
config['binance']['config']['disablebuyobv'] = 1
if self._k == 1:
config['kucoin']['config']['disablebuyobv'] = 1
choice = input("Do you want to disable the Elder-Ray Index on buys? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['disablebuyelderray'] = 1
if self._b == 1:
config['binance']['config']['disablebuyelderray'] = 1
if self._k == 1:
config['kucoin']['config']['disablebuyelderray'] = 1
choice = input("Do you want to disable saving the CSV tracker on buy and sell events? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['disabletracker'] = 1
if self._b == 1:
config['binance']['config']['disabletracker'] = 1
if self._k == 1:
config['kucoin']['config']['disabletracker'] = 1
choice = input("Do you want to disable writing to the log file? (1:yes, 2=no:default)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['disablelog'] = 1
if self._b == 1:
config['binance']['config']['disablelog'] = 1
if self._k == 1:
config['kucoin']['config']['disablelog'] = 1
choice = input("Do you want the bot to auto restart itself on failure? (1:yes:default, 2=no)? ")
if choice != '2':
if self._c == 1:
config['coinbasepro']['config']['autorestart'] = 1
if self._b == 1:
config['binance']['config']['autorestart'] = 1
if self._k == 1:
config['kucoin']['config']['autorestart'] = 1
try:
with open('./config.json', 'w') as fout :
dump(config, fout, indent=4)
print("config.json saved!")
except Exception as err:
print(err)
else:
print('You have to select the exchange you want to use.')
return None
| 48.68 | 133 | 0.492331 |
7956b1695efd2308012e32d4182ab5c8b2b39690 | 83,625 | py | Python | django/db/models/query.py | arthurgit1/django | a47134af754fbf8dd923eb5e0cc162e3b7c0e1c5 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 2 | 2020-11-24T02:26:43.000Z | 2020-11-24T02:26:45.000Z | django/db/models/query.py | arthurgit1/django | a47134af754fbf8dd923eb5e0cc162e3b7c0e1c5 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 1 | 2021-01-15T17:49:48.000Z | 2021-01-15T17:49:48.000Z | django/db/models/query.py | arthurgit1/django | a47134af754fbf8dd923eb5e0cc162e3b7c0e1c5 | [
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 1 | 2021-07-18T04:00:14.000Z | 2021-07-18T04:00:14.000Z | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from itertools import chain
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections,
router, transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, Expression, F, Ref, Value, When
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.models.utils import create_namedtuple_class, resolve_callables
from django.utils import timezone
from django.utils.functional import cached_property, partition
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(field, related_objs, operator.attrgetter(*[
field.attname
if from_field == 'self' else
queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
])) for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
)
return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [*query.extra_select, *query.values_select, *query.annotation_select]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
'QuerySet indices must be integers or slices, not %s.'
% type(k).__name__
)
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk'))
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values('pk'))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)
def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate')
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
annotation = query.annotations[alias]
if not annotation.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
for expr in annotation.get_source_expressions():
if expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs:
name = expr.refs
raise exceptions.FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (annotation.name, name, name)
)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
'Calling QuerySet.get(...) with filters after %s() is not '
'supported.' % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit:
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
'get() returned more than one %s -- it returned %s!' % (
self.model._meta.object_name,
num if not limit or num < limit else 'more than %s' % (limit - 1),
)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _prepare_for_bulk_create(self, objs):
for obj in objs:
if obj.pk is None:
# Populate new PK values.
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
obj._prepare_related_fields_for_save(operation_name='bulk_create')
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
opts = self.model._meta
fields = opts.concrete_fields
objs = list(objs)
self._prepare_for_bulk_create(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts,
)
if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts:
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connections[self.db].features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not isinstance(attr, Expression):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
self.filter(pk__in=pks).update(**update_kwargs)
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
if created:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, 'get_latest_by')
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
assert not self.query.is_sliced, \
"Cannot change a query once a slice has been taken."
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
return self._earliest(*fields)
def latest(self, *fields):
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by('pk'))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name='pk'):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert not self.query.is_sliced, \
"Cannot use 'limit' or 'offset' with in_bulk"
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != 'pk' and
not opts.get_field(field_name).unique and
field_name not in unique_fields and
self.query.distinct_fields != (field_name,)
):
raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name)
if id_list is not None:
if not id_list:
return {}
filter_key = '{}__in'.format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries('delete')
assert not self.query.is_sliced, \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
cursor = query.get_compiler(using).execute_sql(CURSOR)
if cursor:
with cursor:
return cursor.rowcount
return 0
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries('update')
assert not self.query.is_sliced, \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert not self.query.is_sliced, \
"Cannot update a query once a slice has been taken."
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
field_names = {f for f in fields if not hasattr(f, 'resolve_expression')}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower())
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable if named
else FlatValuesListIterable if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day'), \
"'kind' must be one of 'year', 'month', 'week', or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None, is_dst=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \
"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
is_dst=is_dst,
),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries('filter')
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries('exclude')
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if args or kwargs:
assert not self.query.is_sliced, \
"Cannot filter a query once a slice has been taken."
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query('union', *qs[1:], all=all)
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries('select_related')
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries('prefetch_related')
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError('prefetch_related() is not supported with FilteredRelation.')
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries('annotate')
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries('alias')
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate')
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(chain.from_iterable(
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
for field in self.model._meta.get_fields()
))
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation, alias, is_summary=False, select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert not self.query.is_sliced, \
"Cannot reorder a query once a slice has been taken."
obj = self._chain()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries('distinct')
assert not self.query.is_sliced, \
"Cannot create distinct fields once a slice has been taken."
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries('extra')
assert not self.query.is_sliced, \
"Cannot change a query once a slice has been taken"
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError('Cannot reverse a query once a slice has been taken.')
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries('defer')
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries('only')
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError('only() is not supported with FilteredRelation.')
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering and
self.query.get_meta().ordering and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts:
raise NotSupportedError('This database backend does not support ignoring conflicts.')
ops = connections[self.db].ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and not ignore_conflicts:
inserted_rows.extend(self._insert(
item, fields=fields, using=self.db,
returning_fields=self.model._meta.db_returning_fields,
ignore_conflicts=ignore_conflicts,
))
else:
self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts)
return inserted_rows
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError('Cannot use multi-field values as a filter value.')
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression'))
if invalid_args:
raise TypeError(
'QuerySet.%s() received non-expression(s): %s.' % (
method_name,
', '.join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
'Calling QuerySet.%s() after %s() is not supported.'
% (operation_name, self.query.combinator)
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query, model=self.model, query=self.query, params=self.params,
translations=self.translations, using=self._db, hints=self._hints
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise exceptions.FieldDoesNotExist(
'Raw query must include the primary key'
)
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.chain(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet) or (
hasattr(queryset, '_iterable_class') and
not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
'Prefetch querysets cannot use raw(), values(), and '
'values_list().'
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._chain(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, '_prefetched_objects_cache', ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields}
model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields)
self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes]
self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list])
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info['local_setter']
self.remote_setter = klass_info['remote_setter']
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| 41.875313 | 119 | 0.612436 |
7956b2169c3a5cdee99ef738ab7595be889417a3 | 23,285 | py | Python | Stock/BackTesting/Engine/Strategy/DyStockBackTestingCtaEngine.py | wangchen1ren/DevilYuan | dd7c4746134c56eaf3955c706a931cf9bf479941 | [
"MIT"
] | 49 | 2018-07-17T16:17:27.000Z | 2022-03-14T14:01:46.000Z | Stock/BackTesting/Engine/Strategy/DyStockBackTestingCtaEngine.py | yuxiaojian01/DevilYuan | 81ec51b3ada74538aa41d0a42fa8cc958d5fabf1 | [
"MIT"
] | null | null | null | Stock/BackTesting/Engine/Strategy/DyStockBackTestingCtaEngine.py | yuxiaojian01/DevilYuan | 81ec51b3ada74538aa41d0a42fa8cc958d5fabf1 | [
"MIT"
] | 28 | 2018-10-11T02:50:07.000Z | 2022-02-26T12:57:21.000Z | """
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
"""
import types
import copy
from collections import OrderedDict
from DyCommon.DyCommon import *
from ....Data.Engine.DyStockDataEngine import *
from ....Trade.DyStockStrategyBase import *
from ....Trade.Strategy.DyStockCtaBase import *
from ....Trade.Strategy.DyStockCtaEngineExtra import *
from ....Trade.Market.DyStockMarketFilter import *
from ..DyStockBackTestingAccountManager import *
from ....Common.DyStockCommon import *
class DyStockBackTestingCtaEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,从而实现同一套代码从回测到实盘。
一天回测完,回测UI才会显示当日的交易相关的数据。
!!!只支持单策略的回测。
"""
def __init__(self, eventEngine, info, dataEngine, reqData):
# unpack
strategyCls = reqData.strategyCls
settings = reqData.settings # 回测参数设置
self._testCodes = reqData.codes
self._strategyParam = reqData.param
self._strategyParamGroupNo = reqData.paramGroupNo
self._strategyPeriod = [reqData.tDays[0], reqData.tDays[-1]] # 回测周期
self._eventEngine = eventEngine
self._info = info
self._dataEngine = dataEngine
# print策略参数(类参数)
self._printStrategyClsParams(strategyCls)
# 初始化账户和策略实例
self._accountManager = DyStockBackTestingAccountManager(self._eventEngine, self._info, self._dataEngine, settings)
self._accountManager.setParamGroupNoAndPeriod(self._strategyParamGroupNo, self._strategyPeriod)
self._strategy = strategyCls(self, self._info, DyStockStrategyState(DyStockStrategyState.backTesting), self._strategyParam)
self._strategySavedData = None # 策略收盘后保存的数据
self._strategyMarketFilter = DyStockMarketFilter()
# 设置滑点
self.setSlippage(settings['slippage'])
self._info.print('滑点(‰): {}'.format(settings['slippage']), DyLogData.ind2)
self._progress = DyProgress(self._info)
# for easy access
self._daysEngine = self._dataEngine.daysEngine
self._ticksEngine = self._dataEngine.ticksEngine
# error DataEngine
# 有时策略@prepare需要独立载入大量个股数据,避免输出大量log
errorInfo = DyErrorInfo(eventEngine)
self._errorDataEngine = DyStockDataEngine(eventEngine, errorInfo, registerEvent=False)
self._errorDaysEngine = self._errorDataEngine.daysEngine
self._curInit()
def _printStrategyClsParams(self, strategyCls):
"""
print策略参数(类参数),不是用户通过界面配置的参数。界面配置的参数,通过界面显示。
"""
self._info.print('回测策略[{}],参数如下-->'.format(strategyCls.chName), DyLogData.ind1)
for k, v in strategyCls.__dict__.items():
if type(v) is types.FunctionType or type(v) is types.MethodType or type(v) is classmethod:
continue
if k[:2] == '__' and k[-2:] == '__': # python internal attributes
continue
if k in strategyCls.__base__.__dict__ and k != 'backTestingMode':
continue
self._info.print('{}: {}'.format(k, v), DyLogData.ind1)
def _curInit(self, tDay=None):
""" 当日初始化 """
self._curTDay = tDay
self._curTicks = {} # 当日监控股票的当日所有ticks, {code: {time: DyStockCtaTickData}}
self._curLatestTicks = {} # 当日监控股票的当日最新tick, {code: DyStockCtaTickData}
self._curBars = {} # 当日监控股票的当日所有Bars, 日内{code: {time: DyStockCtaBarData}},日线{code: DyStockCtaBarData}
self.__etf300Tick = None
self.__etf500Tick = None
def _loadPreCloseOpen(self, code):
self._info.enable(False)
# 为了获取前日收盘价,向前一日获取股票日线数据,前复权方式基于当日
# !!!回测不考虑股票上市日起
if not self._daysEngine.loadCode(code, [self._curTDay, -1], latestAdjFactorInDb=False):
self._info.enable(True)
return None, None
# get previous close
df = self._daysEngine.getDataFrame(code)
preTDay = self._daysEngine.codeTDayOffset(code, self._curTDay, -1)
if preTDay is None: # 股票首次上市日
self._info.enable(True)
return None, None
preClose = df.ix[preTDay, 'close']
# 获取当日开盘价
try:
open = df.ix[self._curTDay, 'open']
except:
self._info.enable(True)
self._info.print('{}:{}没有{}交易数据'.format(code, self._daysEngine.stockAllCodesFunds[code], self._curTDay), DyLogData.warning)
return None, None
self._info.enable(True)
return preClose, open
def _loadTicks(self, codes):
self._info.print('开始载入{0}只监控股票Ticks数据[{1}]...'.format(len(codes), self._curTDay))
self._progress.init(len(codes), 100)
count = 0
for code in codes:
# 获取昨日收盘和当日开盘
preClose, open = self._loadPreCloseOpen(code)
if open is None:
self._progress.update()
continue
# load ticks
if not self._ticksEngine.loadCode(code, self._curTDay):
# 由于新浪分笔数据有时会缺失,所以不返回错误,只打印警告
self._info.print('{0}:{1}Ticks数据[{2}]载入失败'.format(code, self._daysEngine.stockAllCodesFunds[code], self._curTDay), DyLogData.warning)
self._progress.update()
continue
# to dict for fast access
df = self._ticksEngine.getDataFrame(code)
# 累积值,因为从新浪抓取的实时数据是累积值
df['volume'] = df['volume'].cumsum()*100
df['amount'] = df['amount'].cumsum()
data = df.reset_index().values.tolist()
# load to memory
ticks = {}
high, low = None, None
for datetime, price, volume, amount, type in data:
tick = DyStockCtaTickData()
tick.code = code
tick.name = self._daysEngine.stockAllCodesFunds[code]
tick.date = self._curTDay
tick.time = datetime.strftime('%H:%M:%S')
tick.datetime = datetime
tick.price = price
tick.volume = volume # 累积值
tick.amount = amount # 累积值
# from Days data
tick.preClose = preClose
tick.open = open
low = tick.price if low is None else min(low, tick.price)
high = tick.price if high is None else max(high, tick.price)
tick.low = low
tick.high = high
# set
ticks[tick.time] = tick
self._curTicks[code] = ticks
count += 1
self._progress.update()
self._info.print('{0}只监控股票Ticks数据[{1}]载入完成'.format(count, self._curTDay))
return True
def _getTime(self, seconds):
h = seconds // 3600
m = (seconds % 3600 ) // 60
s = (seconds % 3600 ) % 60
return h, m, s
def _getCtaTicks(self, h, m, s):
"""
获取推送到策略的Ticks
"""
time = '{0}:{1}:{2}'.format(h if h > 9 else ('0' + str(h)), m if m > 9 else ('0' + str(m)), s if s > 9 else ('0' + str(s)))
ticks = {}
for code in self._curTicks:
if time in self._curTicks[code]:
tick = self._curTicks[code][time]
ticks[code] = tick
# save copy of latest tick of current trade day
self._curLatestTicks[code] = copy.copy(tick)
else:
if code in self._curLatestTicks:
# copy it and change to latest time
tick = copy.copy(self._curLatestTicks[code])
tick.time = time
tick.datetime = datetime.strptime(self._curTDay + ' ' + time, '%Y-%m-%d %H:%M:%S')
ticks[code] = tick
return ticks
def _getCtamBars(self, h, m):
"""
获取推送到策略的分钟Bars
缺失的Bar将不会推送
"""
time = '{0}:{1}:{2}'.format(h if h > 9 else ('0' + str(h)), m if m > 9 else ('0' + str(m)), '00')
bars = {}
for code in self._curBars:
if time in self._curBars[code]:
bar = self._curBars[code][time]
bars[code] = bar
return bars
def _onPushAccount(self):
"""
向策略推送账户相关的数据
"""
# 委托回报
entrusts = self._accountManager.popCurWaitingPushEntrusts()
for entrust in entrusts:
self._strategy.onEntrust(entrust)
# 成交回报
deals = self._accountManager.popCurWaitingPushDeals()
for deal in deals:
self._strategy.onDeal(deal)
# 持仓回报,每次都推送,这个跟实盘引擎有区别。实盘引擎只在持仓变化时推送。
if self._accountManager.curPos:
self._strategy.onPos(self._accountManager.curPos)
def _setTicks(self, ticks):
"""
设置ETF300和ETF500 Tick,也适用于bars
"""
etf300Tick = ticks.get(DyStockCommon.etf300)
if etf300Tick:
self.__etf300Tick = etf300Tick
etf500Tick = ticks.get(DyStockCommon.etf500)
if etf500Tick:
self.__etf500Tick = etf500Tick
def _runTicks(self):
self._info.print('开始回测Ticks数据[{0}]...'.format(self._curTDay))
self._progress.init(4*60*60, 100, 20)
# 集合竞价
# 上午和下午交易开始时间
startTime = [(9,30), (13,0)]
for startTimeH, startTimeM in startTime:
for i in range(60*60*2 + 1): # 时间右边界是闭合的
h, m, s = self._getTime(startTimeM*60 + i) # plus minute offset for calculation
h += startTimeH
ticks = self._getCtaTicks(h, m, s)
# onTicks, 引擎不统一catch策略中的任何异常,策略必须处理异常
if ticks:
self._accountManager.onTicks(ticks) # update current positions firstly
self._accountManager.syncStrategyPos(self._strategy) # 同步策略持仓
self._setTicks(ticks)
filteredTicks = self._strategyMarketFilter.filter(ticks)
if filteredTicks:
self._strategy.onTicks(filteredTicks)
self._onPushAccount()
self._progress.update()
self._info.print('Ticks数据[{0}]回测完成'.format(self._curTDay))
def _load1dBars(self, codes):
""" 载入日线数据 """
self._info.print('开始载入{0}只监控股票日线数据[{1}]...'.format(len(codes), self._curTDay))
# 日线bar基于当日复权因子,这样保证整个逻辑跟日内和tick回测一样
if not self._daysEngine.load([self._curTDay, -1], codes=codes, latestAdjFactorInDb=False):
return False
count = 0
for code in codes:
df = self._daysEngine.getDataFrame(code)
if df is None:
continue
# get preClose if 昨日停牌
if df.shape[0] < 2:
# 当日停牌
if self._curTDay != df.index[0].strftime("%Y-%m-%d"):
continue
# 载入当日和昨日数据
if not self._errorDaysEngine.loadCode(code, [self._curTDay, -1], latestAdjFactorInDb=False):
continue
df = self._errorDaysEngine.getDataFrame(code)
if df.shape[0] < 2:
continue
# convert to BarData
barData = DyStockCtaBarData('1d')
barData.code = code
barData.name = self._daysEngine.stockAllCodesFunds[code]
# OHLCV
barData.open = df.ix[-1, 'open']
barData.high = df.ix[-1, 'high']
barData.low = df.ix[-1, 'low']
barData.close = df.ix[-1, 'close']
barData.volume = df.ix[-1, 'volume']
barData.curOpen = barData.open
barData.curHigh = barData.high
barData.curLow = barData.low
barData.preClose = df.ix[0, 'close']
# datetime
barData.date = self._curTDay
barData.time = '15:00:00'
barData.datetime = datetime.strptime(self._curTDay + ' 15:00:00', '%Y-%m-%d %H:%M:%S')
self._curBars[code] = barData
count += 1
self._info.print('{0}只监控股票日线数据[{1}]载入完成'.format(count, self._curTDay))
return True
def _loadmBars(self, codes, m):
""" 载入分钟Bar数据
@m: 分钟
"""
self._info.print('开始载入{0}只监控股票{1}分钟K线数据[{2}]...'.format(len(codes), m, self._curTDay))
self._progress.init(len(codes), 100)
count = 0
for code in codes:
# 获取昨日收盘和当日开盘
preClose, curOpen = self._loadPreCloseOpen(code)
if curOpen is None: # 停牌或者未上市
self._progress.update()
continue
# load ticks
if not self._ticksEngine.loadCode(code, self._curTDay):
# 由于新浪分笔数据有时会缺失,所以不返回错误,只打印警告
self._info.print('{0}:{1}Ticks数据[{2}]载入失败'.format(code, self._daysEngine.stockAllCodesFunds[code], self._curTDay), DyLogData.warning)
self._progress.update()
continue
df = self._ticksEngine.getDataFrame(code)
# 合成分钟Bar, 右闭合
# 缺失的Bar设为NaN
df = df.resample(str(m) + 'min', closed='right', label='right')[['price', 'volume']].agg(OrderedDict([('price', 'ohlc'), ('volume', 'sum')]))
df.dropna(inplace=True) # drop缺失的Bars
data = df.reset_index().values.tolist()
# load to memory
bars = {}
curHigh, curLow = None, None
for datetime, open, high, low, close, volume in data: # DF is MultiIndex
# convert to BarData
barData = DyStockCtaBarData('%sm'%m)
barData.code = code
barData.name = self._daysEngine.stockAllCodesFunds[code]
# OHLCV
barData.open = open
barData.high = high
barData.low = low
barData.close = close
barData.volume = int(volume*100)
barData.preClose = preClose
barData.curOpen = curOpen
curLow = low if curLow is None else min(curLow, low)
curHigh = high if curHigh is None else max(curHigh, high)
barData.curHigh = curHigh
barData.curLow = curLow
# datetime
barData.date = self._curTDay
barData.time = datetime.strftime('%H:%M:%S')
barData.datetime = datetime
bars[barData.time] = barData
self._curBars[code] = bars
count += 1
self._progress.update()
self._info.print('{0}只监控股票{1}分钟K线数据[{2}]载入完成'.format(count, m, self._curTDay))
return True
def _loadBars(self, barMode, codes):
if barMode == 'bar1d':
ret = self._load1dBars(codes)
else: # 分钟Bar, like 'bar1m', 'bar5m', ...
ret = self._loadmBars(codes, int(barMode[3:-1]))
if not ret:
self._info.print('策略载入[{0}]数据[{1}]失败'.format(barMode, self._curTDay), DyLogData.error)
return ret
def _loadData(self, codes):
if 'bar' in self._strategy.backTestingMode:
if not self._loadBars(self._strategy.backTestingMode, codes):
return False
else: # default is Tick Mode
if not self._loadTicks(codes):
self._info.print('策略载入Ticks数据[{0}]失败'.format(self._curTDay), DyLogData.error)
return False
return True
def _run1dBars(self):
self._info.print('开始回测日线数据[{0}]...'.format(self._curTDay))
# onBars, 引擎不统一catch策略中的任何异常,策略必须处理异常
if self._curBars:
self._accountManager.onBars(self._curBars) # update current positions firstly
self._accountManager.syncStrategyPos(self._strategy) # 同步策略持仓
self._setTicks(self._curBars)
filteredBars = self._strategyMarketFilter.filter(self._curBars)
if filteredBars:
self._strategy.onBars(filteredBars)
self._onPushAccount()
self._info.print('日线数据[{0}]回测完成'.format(self._curTDay))
def _runmBars(self, barM):
self._info.print('开始回测{0}分钟K线数据[{1}]...'.format(barM, self._curTDay))
self._progress.init(int(4*60/barM), 100, 20)
# 集合竞价
# 上午和下午交易开始时间
startTime = [(9, 30), (13, 0)]
for startTimeH, startTimeM in startTime:
for i in range(0, 60*60*2 + 1, barM*60):
h, m, s = self._getTime(startTimeM*60 + i) # plus minute offset for calculation
h += startTimeH
bars = self._getCtamBars(h, m)
# onBars, 引擎不统一catch策略中的任何异常,策略必须处理异常
if bars:
self._accountManager.onBars(bars) # update current positions firstly
self._accountManager.syncStrategyPos(self._strategy) # 同步策略持仓
self._setTicks(bars)
filteredBars = self._strategyMarketFilter.filter(bars)
if filteredBars:
self._strategy.onBars(filteredBars)
self._onPushAccount()
self._progress.update()
self._info.print('{0}分钟K线数据[{1}]回测完成'.format(barM, self._curTDay))
def _runBars(self, barMode):
if barMode == 'bar1d':
self._run1dBars()
else:
self._runmBars(int(barMode[3:-1]))
def _runData(self):
if 'bar' in self._strategy.backTestingMode:
self._runBars(self._strategy.backTestingMode)
else: # default is Tick Mode
self._runTicks()
def _verifyParams(self, tDay):
if 'bar' in self._strategy.backTestingMode:
if self._strategy.backTestingMode != 'bar1d':
m = int(self._strategy.backTestingMode[3:-1])
if math.ceil(4*60/m) != int(4*60/m):
return False
return True
def run(self, tDay):
""" 运行指定交易日回测 """
# 检查参数合法性
if not self._verifyParams(tDay):
self._info.print('策略[{0}]分钟Bar模式错误: {1}'.format(tDay, self._strategy.backTestingMode), DyLogData.error)
return False
# 当日初始化
self._curInit(tDay)
# 策略开盘前准备
onOpenCodes = self._strategy.onOpenCodes()
if onOpenCodes is None: # 策略指定的开盘股票代码优先于测试股票代码
onOpenCodes = self._testCodes
if not self._strategy.onOpen(tDay, onOpenCodes):
self._info.print('策略[{0}]开盘前准备失败'.format(tDay), DyLogData.error)
return False
# 账户管理开盘前准备
self._info.enable(False)
ret = self._accountManager.onOpen(tDay)
self._info.enable(True)
if not ret:
self._info.print('账户管理[{0}]开盘前准备失败'.format(tDay), DyLogData.error)
return False
# 设置策略行情过滤器
self._strategyMarketFilter.addFilter(self._strategy.onMonitor())
# 得到策略要监控的股票池
monitoredCodes = self._strategy.onMonitor() + self._accountManager.onMonitor() + [DyStockCommon.etf300, DyStockCommon.etf500]
monitoredCodes = set(monitoredCodes) # 去除重复的股票
monitoredCodes -= set(DyStockCommon.indexes.keys()) # 新浪没有指数的Tick数据
monitoredCodes = list(monitoredCodes)
# 载入监控股票池的回测数据
if not self._loadData(monitoredCodes):
return False
# 运行回测数据
self._runData()
# 收盘后的处理
self._strategy.onClose()
self._accountManager.onClose()
return True
def setSlippage(self, slippage):
""" 设置滑点(成交价的千分之) """
DyStockCtaTickData.slippage = slippage
DyStockCtaBarData.slippage = slippage
def loadPreparedData(self, date, strategyCls):
return None
def loadPreparedPosData(self, date, strategyCls):
return None
def loadOnClose(self, date, strategyCls):
return self._strategySavedData
def saveOnClose(self, date, strategyCls, savedData=None):
self._strategySavedData = savedData
def tLatestDayInDb(self):
return self._dataEngine.daysEngine.tLatestDayInDb()
def tDaysOffsetInDb(self, base, n=0):
return self._dataEngine.daysEngine.tDaysOffsetInDb(base, n)
def putStockMarketMonitorUiEvent(self, strategyCls, data=None, newData=False, op=None, signalDetails=None, datetime_=None):
pass
def putStockMarketStrengthUpdateEvent(self, strategyCls, time, marketStrengthInfo):
pass
def putEvent(self, type, data):
pass
@property
def marketTime(self):
return self.__etf300Tick.time if self.__etf300Tick else None
@property
def marketDatetime(self):
return self.__etf300Tick.datetime if self.__etf300Tick else None
@property
def indexTick(self):
return self.__etf300Tick
@property
def etf300Tick(self):
return self.__etf300Tick
@property
def etf500Tick(self):
return self.__etf500Tick
@property
def dataEngine(self):
return self._dataEngine
@property
def errorDataEngine(self):
return self._errorDataEngine
def getCurPos(self, strategyCls):
return self._accountManager.curPos
def getCurCash(self, strategyCls):
return self._accountManager.curCash
def getCurCapital(self, strategyCls):
return self._accountManager.getCurCapital()
def getCurCodePosMarketValue(self, strategyCls, code):
return self._accountManager.getCurCodePosMarketValue(code)
def getCurPosMarketValue(self, strategyCls):
return self._accountManager.getCurPosMarketValue()
def getCurAckData(self):
return self._accountManager.getCurAckData(self._strategy.__class__)
def getBuyVol(self, cash, code, price):
return DyStockTradeCommon.getBuyVol(cash, code, price)
def buy(self, strategyCls, tick, volume, signalInfo=None):
datetime = tick.datetime
code = tick.code
name = tick.name
price = getattr(tick, strategyCls.buyPrice)
return self._accountManager.buy(datetime, strategyCls, code, name, price, volume, signalInfo, tick)
def sell(self, strategyCls, tick, volume, sellReason=None, signalInfo=None):
datetime = tick.datetime
code = tick.code
price = getattr(tick, strategyCls.sellPrice)
return self._accountManager.sell(datetime, strategyCls, code, price, volume, sellReason, signalInfo, tick)
def buyByRatio(self, strategyCls, tick, ratio, ratioMode, signalInfo=None):
return DyStockCtaEngineExtra.buyByRatio(self, self._accountManager, strategyCls, tick, ratio, ratioMode, signalInfo)
def sellByRatio(self, strategy, tick, ratio, ratioMode, sellReason=None, signalInfo=None):
return DyStockCtaEngineExtra.sellByRatio(self, self._accountManager, strategy, tick, ratio, ratioMode, sellReason, signalInfo)
def closePos(self, strategyCls, tick, volume, sellReason, signalInfo=None):
"""
注释参照DyStockCtaEngine
"""
datetime = tick.datetime
code = tick.code
price = getattr(tick, strategyCls.sellPrice)
return self._accountManager.sell(datetime, strategyCls, code, price, volume, sellReason, signalInfo, tick)
def cancel(self, strategyCls, cancelEntrust):
"""
策略撤销委托
"""
return False | 32.888418 | 153 | 0.586601 |
7956b23e993ee0e9ddcf3ae793ace2fc534b484e | 439 | py | Python | product/migrations/0017_alter_category_category_name.py | SergueiNK/oc_projet_p8 | a2a3e3df7ecd048207f342343163a3aecf246423 | [
"Unlicense"
] | null | null | null | product/migrations/0017_alter_category_category_name.py | SergueiNK/oc_projet_p8 | a2a3e3df7ecd048207f342343163a3aecf246423 | [
"Unlicense"
] | 3 | 2021-12-04T16:47:44.000Z | 2021-12-04T19:25:31.000Z | product/migrations/0017_alter_category_category_name.py | SergueiNK/oc_projet_p8 | a2a3e3df7ecd048207f342343163a3aecf246423 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.6 on 2021-10-29 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0016_auto_20211029_1210'),
]
operations = [
migrations.AlterField(
model_name='category',
name='category_name',
field=models.CharField(max_length=500, unique=True, verbose_name='category name'),
),
]
| 23.105263 | 94 | 0.626424 |
7956b3a6233334391737ccd58b76467a04784ff3 | 22,611 | py | Python | pandas/core/indexes/category.py | trentpark8800/pandas | 4edf3351b0bfef551a01adf3ca2e6e657448ed80 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/category.py | trentpark8800/pandas | 4edf3351b0bfef551a01adf3ca2e6e657448ed80 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/category.py | trentpark8800/pandas | 4edf3351b0bfef551a01adf3ca2e6e657448ed80 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from typing import Any, List, Optional
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import ArrayLike, Label
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_scalar,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna
from pandas.core import accessor
from pandas.core.arrays.categorical import Categorical, contains
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs, maybe_extract_name
from pandas.core.indexes.extension import NDArrayBackedExtensionIndex, inherit_names
import pandas.core.missing as missing
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "CategoricalIndex"})
@inherit_names(
[
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
"is_dtype_equal",
"min",
"max",
],
Categorical,
)
@accessor.delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
overwrite=True,
)
class CategoricalIndex(NDArrayBackedExtensionIndex, accessor.PandasDelegate):
"""
Index based on an underlying :class:`Categorical`.
CategoricalIndex, like Categorical, can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
like Categorical, it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
Index : The base pandas Index type.
Categorical : A categorical array.
CategoricalDtype : Type for categorical data.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_
for more.
Examples
--------
>>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
>>> pd.CategoricalIndex(c)
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = pd.CategoricalIndex(
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
... )
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['c', 'b', 'a'], ordered=True, dtype='category')
>>> ci.min()
'c'
"""
_typ = "categoricalindex"
@property
def _can_hold_strings(self):
return self.categories._can_hold_strings
codes: np.ndarray
categories: Index
_data: Categorical
_values: Categorical
@property
def _engine_type(self):
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
}[self.codes.dtype.type]
_attributes = ["name"]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None
):
dtype = CategoricalDtype._from_values_or_dtype(data, categories, ordered, dtype)
name = maybe_extract_name(name, data, cls)
if not is_categorical_dtype(data):
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
raise cls._scalar_data_error(data)
data = []
assert isinstance(dtype, CategoricalDtype), dtype
data = extract_array(data, extract_numpy=True)
if not isinstance(data, Categorical):
data = Categorical(data, dtype=dtype)
elif isinstance(dtype, CategoricalDtype) and dtype != data.dtype:
# we want to silently ignore dtype='category'
data = data._set_dtype(dtype)
data = data.copy() if copy else data
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: Categorical, name: Label = None):
assert isinstance(values, Categorical), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
# error: Argument 1 of "_shallow_copy" is incompatible with supertype
# "ExtensionIndex"; supertype defines the argument type as
# "Optional[ExtensionArray]" [override]
@doc(Index._shallow_copy)
def _shallow_copy( # type:ignore[override]
self,
values: Optional[Categorical] = None,
name: Label = no_default,
):
name = self.name if name is no_default else name
if values is not None:
# In tests we only get here with Categorical objects that
# have matching .ordered, and values.categories a subset of
# our own. However we do _not_ have a dtype match in general.
values = Categorical(values, dtype=self.dtype)
return super()._shallow_copy(values=values, name=name)
def _is_dtype_compat(self, other) -> Categorical:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
other = extract_array(other)
if not other._categories_match_up_to_permutation(self):
raise TypeError(
"categories must match existing categories when appending"
)
else:
values = other
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
other = other._values
if not ((other == values) | (isna(other) & isna(values))).all():
# GH#37667 see test_equals_non_category
raise TypeError(
"categories must match existing categories when appending"
)
return other
def equals(self, other: object) -> bool:
"""
Determine if two CategoricalIndex objects contain the same elements.
Returns
-------
bool
If two CategoricalIndex objects have equal elements True,
otherwise False.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
except (TypeError, ValueError):
return False
return self._data.equals(other)
# --------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
attrs = [
(
"categories",
ibase.default_pprint(self.categories, max_seq_items=max_categories),
),
# pandas\core\indexes\category.py:315: error: "CategoricalIndex"
# has no attribute "ordered" [attr-defined]
("ordered", self.ordered), # type: ignore[attr-defined]
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]:
from pandas.io.formats.printing import pprint_thing
result = [
pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep
for x in self._values
]
return header + result
# --------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "categorical"
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_valid_nat_for_dtype(key, self.categories.dtype):
return self.hasnans
return contains(self, key, container=self._engine)
@doc(Index.astype)
def astype(self, dtype, copy=True):
res_data = self._data.astype(dtype, copy=copy)
return Index(res_data, name=self.name)
@doc(Index.fillna)
def fillna(self, value, downcast=None):
value = self._require_scalar(value)
cat = self._data.fillna(value)
return type(self)._simple_new(cat, name=self.name)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves.
# To avoid a reference cycle, bind `codes` to a local variable, so
# `self` is not passed into the lambda.
codes = self.codes
return self._engine_type(lambda: codes, len(self))
@doc(Index.unique)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._values.unique()
# Use _simple_new instead of _shallow_copy to ensure we keep dtype
# of result, not self.
return type(self)._simple_new(result, name=self.name)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype("object")
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
missing: List[int]
if self.equals(target):
indexer = None
missing = []
else:
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes) and indexer is not None:
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
cat = self._data._from_backing_data(codes)
new_target = type(self)._simple_new(cat, name=self.name)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = Categorical(new_target, dtype=target.dtype)
new_target = type(self)._simple_new(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
"""
reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = Categorical(new_target, dtype=self.dtype)
new_target = type(self)._simple_new(new_target, name=self.name)
return new_target, indexer, new_indexer
# --------------------------------------------------------------------
# Indexing Methods
def _maybe_cast_indexer(self, key) -> int:
return self._data._unbox_scalar(key)
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase.ensure_index(target)
self._check_indexing_method(method)
if self.is_unique and self.equals(target):
return np.arange(len(self), dtype="intp")
return self._get_indexer_non_unique(target._values)[0]
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
return self._get_indexer_non_unique(target._values)
def _get_indexer_non_unique(self, values: ArrayLike):
"""
get_indexer_non_unique but after unrapping the target Index object.
"""
# Note: we use engine.get_indexer_non_unique for get_indexer in addition
# to get_indexer_non_unique because, even if `target` is unique, any
# non-category entries in it will be encoded as -1 so `codes` may
# not be unique.
if isinstance(values, Categorical):
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
cat = self._data._encode_with_my_categories(values)
codes = cat._codes
else:
codes = self.categories.get_indexer(values)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
# See tests.indexing.interval.test_interval:test_loc_getitem_frame
indexer = self.categories._convert_list_indexer(keyarr)
return Index(self.codes).get_indexer_for(indexer)
return self.get_indexer_for(keyarr)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind):
if kind == "loc":
return label
return super()._maybe_cast_slice_bound(label, side, kind)
# --------------------------------------------------------------------
def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(*args, **kwargs)
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)
def _concat(self, to_concat: List["Index"], name: Label) -> "CategoricalIndex":
# if calling index is category, don't check dtype of others
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
cat = self._data._from_backing_data(codes)
return type(self)._simple_new(cat, name=name)
def _delegate_method(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
| 34.520611 | 94 | 0.595064 |
7956b3cca36622762023e8eed883294e99b3f814 | 1,026 | py | Python | backend/backend/urls.py | Hosermes/critterco-dev | 963e17f53c162e899353b8468064b67482db9f60 | [
"MIT"
] | 4 | 2020-05-21T08:34:54.000Z | 2020-08-01T11:57:40.000Z | backend/backend/urls.py | HosseinMirjalali/critterco-dev | 909fabf1dca2fb7a50bd163db0ecee67c252c4ac | [
"MIT"
] | 5 | 2020-06-29T11:12:27.000Z | 2020-08-13T11:53:20.000Z | backend/backend/urls.py | Hosermes/critterco-dev | 963e17f53c162e899353b8468064b67482db9f60 | [
"MIT"
] | 2 | 2020-04-30T11:16:22.000Z | 2020-05-15T17:14:12.000Z | from django.contrib import admin
from django.urls import path, include
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("apps.biz.urls")),
path("", include("apps.comments.urls")),
path("api/user/", include("apps.user.urls")),
path("api/token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("api/token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
url(
r"^api/password_reset/",
include("django_rest_passwordreset.urls", namespace="password_reset"),
),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [url(r"^api/silk/", include("silk.urls", namespace="silk"))]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls)), ] + urlpatterns
| 35.37931 | 83 | 0.712476 |
7956b3fa955f64315ca11b2fc0dbc51695307639 | 1,803 | py | Python | test/test_get_address_details_from_callback_e400.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | test/test_get_address_details_from_callback_e400.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | test/test_get_address_details_from_callback_e400.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.invalid_pagination import InvalidPagination
from cryptoapis.model.limit_greater_than_allowed import LimitGreaterThanAllowed
from cryptoapis.model.uri_not_found import UriNotFound
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['InvalidPagination'] = InvalidPagination
globals()['LimitGreaterThanAllowed'] = LimitGreaterThanAllowed
globals()['UriNotFound'] = UriNotFound
from cryptoapis.model.get_address_details_from_callback_e400 import GetAddressDetailsFromCallbackE400
class TestGetAddressDetailsFromCallbackE400(unittest.TestCase):
"""GetAddressDetailsFromCallbackE400 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetAddressDetailsFromCallbackE400(self):
"""Test GetAddressDetailsFromCallbackE400"""
# FIXME: construct object with mandatory attributes with example values
# model = GetAddressDetailsFromCallbackE400() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 40.066667 | 484 | 0.793677 |
7956b522a5f8016bc4a462e4a6402a1c7314971d | 325 | py | Python | app/user/migrations/0011_remove_user_username.py | lenaunderwood22/django-forum | 9d739166029197dcd7256d1250641928cff01251 | [
"MIT"
] | null | null | null | app/user/migrations/0011_remove_user_username.py | lenaunderwood22/django-forum | 9d739166029197dcd7256d1250641928cff01251 | [
"MIT"
] | null | null | null | app/user/migrations/0011_remove_user_username.py | lenaunderwood22/django-forum | 9d739166029197dcd7256d1250641928cff01251 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-10-12 08:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0010_auto_20211012_0847'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
| 18.055556 | 47 | 0.587692 |
7956b582ae0f6160babd4b8e7b8865a51218d932 | 24 | py | Python | SerumWriter/Utils/Constants.py | serumstudio/SerumWriter | 5e212b49e8d3da3890cd685a985438d298db5e26 | [
"MIT"
] | 2 | 2022-03-24T05:29:02.000Z | 2022-03-24T11:01:44.000Z | SerumWriter/Utils/Constants.py | serumstudio/SerumWriter | 5e212b49e8d3da3890cd685a985438d298db5e26 | [
"MIT"
] | null | null | null | SerumWriter/Utils/Constants.py | serumstudio/SerumWriter | 5e212b49e8d3da3890cd685a985438d298db5e26 | [
"MIT"
] | null | null | null |
LIB_KEY = 'sidlibv1.0' | 12 | 22 | 0.666667 |
7956b5b1c0bec33226f130b9a068af06272f4e96 | 528 | py | Python | windflow/web/helpers.py | hartym/windflow | 69a2aeb3ab9d5bdfcd1d2ce8d5c9af14876cba23 | [
"Apache-2.0"
] | 2 | 2016-09-22T17:17:59.000Z | 2017-08-27T15:40:16.000Z | windflow/web/helpers.py | hartym/windflow | 69a2aeb3ab9d5bdfcd1d2ce8d5c9af14876cba23 | [
"Apache-2.0"
] | 1 | 2019-10-21T14:58:12.000Z | 2019-10-21T14:58:13.000Z | windflow/web/helpers.py | hartym/windflow | 69a2aeb3ab9d5bdfcd1d2ce8d5c9af14876cba23 | [
"Apache-2.0"
] | null | null | null | def create_url_for_helper(name, *parts, **defaults):
def url_for_helper(self, *args, absolute=False, **kwargs):
params = dict(defaults)
params.update(kwargs)
_parts = params.pop('parts', {})
_parts.update(dict(zip(parts, map(str, args))))
if _parts:
params['parts'] = _parts
url = self.request.app.router[name].url(**params)
return self.make_url_absolute(url) if absolute else url
url_for_helper.__name__ = 'url_for_' + name
return url_for_helper
| 37.714286 | 63 | 0.638258 |
7956b6710900747ae4aa86a307f0e45e7a18bb28 | 3,933 | py | Python | tests/test_versioning.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | tests/test_versioning.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | tests/test_versioning.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_versioning
~~~~~~~~~~~~~~~
Test the versioning implementation.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pickle
import pytest
from docutils.parsers.rst.directives.html import MetaBody
from sphinx import addnodes
from sphinx.versioning import add_uids, merge_doctrees, get_ratio
from sphinx.testing.util import SphinxTestApp
app = original = original_uids = None
@pytest.fixture(scope='module', autouse=True)
def setup_module(rootdir, sphinx_test_tempdir):
global app, original, original_uids
srcdir = sphinx_test_tempdir / 'test-versioning'
if not srcdir.exists():
(rootdir/'test-versioning').copytree(srcdir)
app = SphinxTestApp(srcdir=srcdir)
app.builder.env.app = app
app.connect('doctree-resolved', on_doctree_resolved)
app.build()
original = doctrees['original']
original_uids = [n.uid for n in add_uids(original, is_paragraph)]
yield
app.cleanup()
doctrees = {}
def on_doctree_resolved(app, doctree, docname):
doctrees[docname] = doctree
def is_paragraph(node):
return node.__class__.__name__ == 'paragraph'
def test_get_ratio():
assert get_ratio('', 'a')
assert get_ratio('a', '')
def test_add_uids():
assert len(original_uids) == 3
def test_picklablility():
# we have to modify the doctree so we can pickle it
copy = original.copy()
copy.reporter = None
copy.transformer = None
copy.settings.warning_stream = None
copy.settings.env = None
copy.settings.record_dependencies = None
for metanode in copy.traverse(MetaBody.meta):
metanode.__class__ = addnodes.meta
loaded = pickle.loads(pickle.dumps(copy, pickle.HIGHEST_PROTOCOL))
assert all(getattr(n, 'uid', False) for n in loaded.traverse(is_paragraph))
def test_modified():
modified = doctrees['modified']
new_nodes = list(merge_doctrees(original, modified, is_paragraph))
uids = [n.uid for n in modified.traverse(is_paragraph)]
assert not new_nodes
assert original_uids == uids
def test_added():
added = doctrees['added']
new_nodes = list(merge_doctrees(original, added, is_paragraph))
uids = [n.uid for n in added.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert original_uids == uids[:-1]
def test_deleted():
deleted = doctrees['deleted']
new_nodes = list(merge_doctrees(original, deleted, is_paragraph))
uids = [n.uid for n in deleted.traverse(is_paragraph)]
assert not new_nodes
assert original_uids[::2] == uids
def test_deleted_end():
deleted_end = doctrees['deleted_end']
new_nodes = list(merge_doctrees(original, deleted_end, is_paragraph))
uids = [n.uid for n in deleted_end.traverse(is_paragraph)]
assert not new_nodes
assert original_uids[:-1] == uids
def test_insert():
insert = doctrees['insert']
new_nodes = list(merge_doctrees(original, insert, is_paragraph))
uids = [n.uid for n in insert.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:]
def test_insert_beginning():
insert_beginning = doctrees['insert_beginning']
new_nodes = list(merge_doctrees(original, insert_beginning, is_paragraph))
uids = [n.uid for n in insert_beginning.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert len(uids) == 4
assert original_uids == uids[1:]
assert original_uids[0] != uids[0]
def test_insert_similar():
insert_similar = doctrees['insert_similar']
new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph))
uids = [n.uid for n in insert_similar.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert new_nodes[0].rawsource == u'Anyway I need more'
assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:]
| 29.133333 | 79 | 0.70506 |
7956b738afd059ba0c67f9fc38a44500f4fc5ba3 | 1,359 | py | Python | setup.py | SummitESP/login-service-backend | a99f53069837f32d95be0bd46a11d54cc83544d9 | [
"MIT"
] | null | null | null | setup.py | SummitESP/login-service-backend | a99f53069837f32d95be0bd46a11d54cc83544d9 | [
"MIT"
] | 1 | 2019-03-07T22:54:41.000Z | 2019-03-07T22:54:41.000Z | setup.py | SummitESP/login-service-backend | a99f53069837f32d95be0bd46a11d54cc83544d9 | [
"MIT"
] | 1 | 2021-04-14T16:25:52.000Z | 2021-04-14T16:25:52.000Z | #!/usr/bin/env python
import login_backend
long_description = open('README.md').read()
setup_args = dict(
name='login-service-backend',
version=login_backend.__version__,
description='Django authentication backend for Summit tools unified Login Service.',
long_description=long_description,
author='Jeremy Satterfield',
author_email='jsatterfield@summitesp.com',
license='MIT License',
packages=['login_backend'],
install_requires=[
'django>=1.6',
],
classifers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
],
)
if __name__ == '__main__':
from distutils.core import setup
setup(**setup_args)
| 32.357143 | 88 | 0.627667 |
7956b756dcb3ca5240fae43cf7b63ed0c74b73a1 | 16,352 | py | Python | tests/callbacks/test_lr_monitor.py | calebrob6/pytorch-lightning | 4c79b3a5b343866217784c66d122819c59a92c1d | [
"Apache-2.0"
] | 4 | 2021-07-27T14:39:02.000Z | 2022-03-07T10:57:13.000Z | tests/callbacks/test_lr_monitor.py | calebrob6/pytorch-lightning | 4c79b3a5b343866217784c66d122819c59a92c1d | [
"Apache-2.0"
] | 1 | 2021-03-25T18:10:25.000Z | 2021-04-06T18:54:04.000Z | tests/callbacks/test_lr_monitor.py | calebrob6/pytorch-lightning | 4c79b3a5b343866217784c66d122819c59a92c1d | [
"Apache-2.0"
] | 1 | 2021-03-25T17:09:28.000Z | 2021-03-25T17:09:28.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import optim
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.finetuning import BackboneFinetuning
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.simple_models import ClassificationModel
def test_lr_monitor_single_lr(tmpdir):
""" Test that learning rates are extracted and logged for single lr scheduler. """
tutils.reset_seed()
model = BoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert all(v is None for v in lr_monitor.last_momentum_values.values()), \
'Momentum should not be logged by default'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['lr-SGD'], \
'Names of learning rates not set correctly'
@pytest.mark.parametrize('opt', ['SGD', 'Adam'])
def test_lr_monitor_single_lr_with_momentum(tmpdir, opt: str):
"""Test that learning rates and momentum are extracted and logged for single lr scheduler."""
class LogMomentumModel(BoringModel):
def __init__(self, opt):
super().__init__()
self.opt = opt
def configure_optimizers(self):
if self.opt == 'SGD':
opt_kwargs = {'momentum': 0.9}
elif self.opt == 'Adam':
opt_kwargs = {'betas': (0.9, 0.999)}
optimizer = getattr(optim, self.opt)(self.parameters(), lr=1e-2, **opt_kwargs)
lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-2, total_steps=10_000)
return [optimizer], [lr_scheduler]
model = LogMomentumModel(opt=opt)
lr_monitor = LearningRateMonitor(log_momentum=True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert all(v is not None for v in lr_monitor.last_momentum_values.values()), \
'Expected momentum to be logged'
assert len(lr_monitor.last_momentum_values) == len(trainer.lr_schedulers), \
'Number of momentum values logged does not match number of lr schedulers'
assert all(k == f'lr-{opt}-momentum' for k in lr_monitor.last_momentum_values.keys()), \
'Names of momentum values not set correctly'
def test_log_momentum_no_momentum_optimizer(tmpdir):
"""
Test that if optimizer doesn't have momentum then a warning is raised with log_momentum=True.
"""
class LogMomentumModel(BoringModel):
def configure_optimizers(self):
optimizer = optim.ASGD(self.parameters(), lr=1e-2)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = LogMomentumModel()
lr_monitor = LearningRateMonitor(log_momentum=True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=2,
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
)
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert all(v == 0 for v in lr_monitor.last_momentum_values.values()), \
'Expected momentum to be logged'
assert len(lr_monitor.last_momentum_values) == len(trainer.lr_schedulers), \
'Number of momentum values logged does not match number of lr schedulers'
assert all(k == 'lr-ASGD-momentum' for k in lr_monitor.last_momentum_values.keys()), \
'Names of momentum values not set correctly'
def test_lr_monitor_no_lr_scheduler(tmpdir):
tutils.reset_seed()
class CustomBoringModel(BoringModel):
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=0.1)
return optimizer
model = CustomBoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
with pytest.warns(RuntimeWarning, match='have no learning rate schedulers'):
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_lr_monitor_no_logger(tmpdir):
tutils.reset_seed()
model = BoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[lr_monitor],
logger=False,
)
with pytest.raises(MisconfigurationException, match='`Trainer` that has no logger'):
trainer.fit(model)
@pytest.mark.parametrize("logging_interval", ['step', 'epoch'])
def test_lr_monitor_multi_lrs(tmpdir, logging_interval: str):
""" Test that learning rates are extracted and logged for multi lr schedulers. """
tutils.reset_seed()
class CustomBoringModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=1e-2)
optimizer2 = optim.Adam(self.parameters(), lr=1e-2)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = CustomBoringModel()
model.training_epoch_end = None
lr_monitor = LearningRateMonitor(logging_interval=logging_interval)
log_every_n_steps = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
log_every_n_steps=log_every_n_steps,
limit_train_batches=7,
limit_val_batches=0.1,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert lr_monitor.lr_sch_names == ['lr-Adam', 'lr-Adam-1'], \
'Names of learning rates not set correctly'
if logging_interval == 'step':
expected_number_logged = trainer.global_step // log_every_n_steps
if logging_interval == 'epoch':
expected_number_logged = trainer.max_epochs
assert all(len(lr) == expected_number_logged for lr in lr_monitor.lrs.values()), \
'Length of logged learning rates do not match the expected number'
def test_lr_monitor_param_groups(tmpdir):
""" Test that learning rates are extracted and logged for single lr scheduler. """
tutils.reset_seed()
class CustomClassificationModel(ClassificationModel):
def configure_optimizers(self):
param_groups = [{
'params': list(self.parameters())[:2],
'lr': self.lr * 0.1
}, {
'params': list(self.parameters())[2:],
'lr': self.lr
}]
optimizer = optim.Adam(param_groups)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return [optimizer], [lr_scheduler]
model = CustomClassificationModel()
dm = ClassifDataModule()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == 2 * len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of param groups'
assert lr_monitor.lr_sch_names == ['lr-Adam']
assert list(lr_monitor.lrs.keys()) == ['lr-Adam/pg1', 'lr-Adam/pg2'], \
'Names of learning rates not set correctly'
def test_lr_monitor_custom_name(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer, [scheduler] = super().configure_optimizers()
lr_scheduler = {'scheduler': scheduler, 'name': 'my_logging_name'}
return optimizer, [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(TestModel())
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['my_logging_name']
def test_lr_monitor_custom_pg_name(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD([{'params': list(self.layer.parameters()), 'name': 'linear'}], lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(TestModel())
assert lr_monitor.lr_sch_names == ['lr-SGD']
assert list(lr_monitor.lrs) == ['lr-SGD/linear']
def test_lr_monitor_duplicate_custom_pg_names(tmpdir):
tutils.reset_seed()
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.linear_a = torch.nn.Linear(32, 16)
self.linear_b = torch.nn.Linear(16, 2)
def forward(self, x):
x = self.linear_a(x)
x = self.linear_b(x)
return x
def configure_optimizers(self):
param_groups = [
{
'params': list(self.linear_a.parameters()),
'name': 'linear'
},
{
'params': list(self.linear_b.parameters()),
'name': 'linear'
},
]
optimizer = torch.optim.SGD(param_groups, lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
with pytest.raises(
MisconfigurationException, match='A single `Optimizer` cannot have multiple parameter groups with identical'
):
trainer.fit(TestModel())
def test_multiple_optimizers_basefinetuning(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(
torch.nn.Linear(32, 32),
torch.nn.Linear(32, 32),
torch.nn.Linear(32, 32),
torch.nn.ReLU(True),
)
self.layer = torch.nn.Linear(32, 2)
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def forward(self, x):
return self.layer(self.backbone(x))
def configure_optimizers(self):
parameters = list(filter(lambda p: p.requires_grad, self.parameters()))
opt = optim.Adam(parameters, lr=0.1)
opt_2 = optim.Adam(parameters, lr=0.1)
opt_3 = optim.Adam(parameters, lr=0.1)
optimizers = [opt, opt_2, opt_3]
schedulers = [
optim.lr_scheduler.StepLR(opt, step_size=1, gamma=0.5),
optim.lr_scheduler.StepLR(opt_2, step_size=1, gamma=0.5),
]
return optimizers, schedulers
class Check(Callback):
def on_train_epoch_start(self, trainer, pl_module) -> None:
num_param_groups = sum([len(opt.param_groups) for opt in trainer.optimizers])
assert lr_monitor.lr_sch_names == ['lr-Adam', 'lr-Adam-1']
if trainer.current_epoch == 0:
assert num_param_groups == 3
elif trainer.current_epoch == 1:
assert num_param_groups == 4
assert list(lr_monitor.lrs) == ['lr-Adam-1', 'lr-Adam/pg1', 'lr-Adam/pg2']
elif trainer.current_epoch == 2:
assert num_param_groups == 5
assert list(lr_monitor.lrs) == ['lr-Adam/pg1', 'lr-Adam/pg2', 'lr-Adam-1/pg1', 'lr-Adam-1/pg2']
else:
expected = ['lr-Adam/pg1', 'lr-Adam/pg2', 'lr-Adam-1/pg1', 'lr-Adam-1/pg2', 'lr-Adam-1/pg3']
assert list(lr_monitor.lrs) == expected
class TestFinetuning(BackboneFinetuning):
def freeze_before_training(self, pl_module):
self.freeze(pl_module.backbone[0])
self.freeze(pl_module.backbone[1])
self.freeze(pl_module.layer)
def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
"""Called when the epoch begins."""
if epoch == 1 and opt_idx == 0:
self.unfreeze_and_add_param_group(pl_module.backbone[0], optimizer, lr=0.1)
if epoch == 2 and opt_idx == 1:
self.unfreeze_and_add_param_group(pl_module.layer, optimizer, lr=0.1)
if epoch == 3 and opt_idx == 1:
assert len(optimizer.param_groups) == 2
self.unfreeze_and_add_param_group(pl_module.backbone[1], optimizer, lr=0.1)
assert len(optimizer.param_groups) == 3
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=5,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[TestFinetuning(), lr_monitor, Check()],
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False
)
model = TestModel()
model.training_epoch_end = None
trainer.fit(model)
expected = [0.1, 0.05, 0.025, 0.0125, 0.00625]
assert lr_monitor.lrs['lr-Adam/pg1'] == expected
expected = [0.1, 0.05, 0.025, 0.0125]
assert lr_monitor.lrs['lr-Adam/pg2'] == expected
expected = [0.1, 0.05, 0.025, 0.0125, 0.00625]
assert lr_monitor.lrs['lr-Adam-1/pg1'] == expected
expected = [0.1, 0.05, 0.025]
assert lr_monitor.lrs['lr-Adam-1/pg2'] == expected
expected = [0.1, 0.05]
assert lr_monitor.lrs['lr-Adam-1/pg3'] == expected
| 36.017621 | 116 | 0.6483 |
7956b7b285e0a67f8eaf8d6205b5e24f071da8ef | 30,380 | py | Python | DSP_Task1/newestUpdate2.py | Haidy-sayed/SBME-3rd-year-DSP-Tasks | 938d4b64d2debf2fcd0387796aa530d8c25c0777 | [
"MIT"
] | null | null | null | DSP_Task1/newestUpdate2.py | Haidy-sayed/SBME-3rd-year-DSP-Tasks | 938d4b64d2debf2fcd0387796aa530d8c25c0777 | [
"MIT"
] | null | null | null | DSP_Task1/newestUpdate2.py | Haidy-sayed/SBME-3rd-year-DSP-Tasks | 938d4b64d2debf2fcd0387796aa530d8c25c0777 | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIgodhelpus.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from pyqtgraph import PlotWidget
import pyqtgraph
from PyQt5 import QtCore, QtGui, QtWidgets
#from matplotlib.pyplot import draw
import pandas as pd
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QColorDialog, QFileDialog, QFrame, QWidget, QInputDialog, QLineEdit,QComboBox
import os
import numpy as np
from PyQt5.QtWidgets import QMessageBox
import sys
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QColorDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QColor
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pyqtgraph.graphicsItems.ImageItem import ImageItem
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import cv2
import io
from scipy.signal.spectral import spectrogram
class SpecCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize = (width , height) , dpi=dpi)
self.axes = fig.add_subplot(111)
super(SpecCanvas, self).__init__(fig)
fig.tight_layout()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1384, 696)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1422, 693)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.signalComboBox = QtWidgets.QComboBox(self.centralwidget)
self.signalComboBox.setGeometry(QtCore.QRect(370, 570, 221, 22))
self.signalComboBox.setObjectName("SignalComboBox")
self.signalComboBox.addItem("")
self.signalComboBox.addItem("")
self.signalComboBox.addItem("")
self.signalComboBox.addItem("")
self.pauseButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.pauseButtonCh.setGeometry(QtCore.QRect(190, 520, 75, 23))
self.pauseButtonCh.setObjectName("pauseButtonCh")
self.slowerButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.slowerButtonCh.setGeometry(QtCore.QRect(510, 520, 75, 23))
self.slowerButtonCh.setObjectName("slowerButtonCh")
self.zoomOutButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.zoomOutButtonCh.setGeometry(QtCore.QRect(350, 520, 75, 23))
self.zoomOutButtonCh.setObjectName("zoomOutButtonCh")
self.showButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.showButtonCh.setGeometry(QtCore.QRect(670, 520, 75, 23))
self.showButtonCh.setObjectName("showButtonCh")
self.zoomInButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.zoomInButtonCh.setGeometry(QtCore.QRect(270, 520, 75, 23))
self.zoomInButtonCh.setObjectName("zoomInButtonCh")
self.spectroButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.spectroButtonCh.setGeometry(QtCore.QRect(430, 520, 75, 23))
self.spectroButtonCh.setObjectName("spectroButtonCh")
self.hideButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.hideButtonCh.setGeometry(QtCore.QRect(750, 520, 75, 23))
self.hideButtonCh.setObjectName("hideButtonCh")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(30, 520, 61, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.playButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.playButtonCh.setGeometry(QtCore.QRect(110, 520, 75, 23))
self.playButtonCh.setObjectName("playButtonCh")
self.fasterButtonCh = QtWidgets.QPushButton(self.centralwidget)
self.fasterButtonCh.setGeometry(QtCore.QRect(590, 520, 75, 23))
self.fasterButtonCh.setObjectName("fasterButtonCh")
self.addLabelButton = QtWidgets.QPushButton(self.centralwidget)
self.addLabelButton.setGeometry(QtCore.QRect(830, 520, 75, 23))
self.addLabelButton.setObjectName("addLabelButton")
self.spectroMinSlider = QtWidgets.QSlider(self.centralwidget)
self.spectroMinSlider.setGeometry(QtCore.QRect(990, 480, 160, 22))
self.spectroMinSlider.setOrientation(QtCore.Qt.Horizontal)
self.spectroMinSlider.setObjectName("spectroMinSlider")
self.spectroMaxSlider = QtWidgets.QSlider(self.centralwidget)
self.spectroMaxSlider.setGeometry(QtCore.QRect(1220, 480, 160, 22))
self.spectroMaxSlider.setOrientation(QtCore.Qt.Horizontal)
self.spectroMaxSlider.setObjectName("spectroMaxSlider")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setGeometry(QtCore.QRect(40, 80, 1271, 361))
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.spectroMinSlider.valueChanged.connect(lambda: self.maxSlider())
self.spectroMaxSlider.valueChanged.connect(lambda: self.minSlider())
#def maxSliderValue(self):
# self.minSlider(self.spectroMaxSlider.value())
#def minSliderValue(self):
# self.minSlider(self.spectroMinSlider.value())
#Our plotting widget
self.graphCh1Container=QtWidgets.QWidget(self.splitter)
self.graphCh1=pyqtgraph.GraphicsLayoutWidget(self.graphCh1Container)
self.graphCh1.setGeometry(QtCore.QRect(20, 60, 1271, 221))
self.graphCh1.setObjectName("graphCh1")
#creating a plot (axis ..etc)
p1=self.graphCh1.addPlot()
#to put the limits of our graph .. solved compression problem)
#coz it controls range of axis in the single frame
p1.setXRange(0, 20, padding=0)
p1.setLimits(xMin=0)
#defining our 3 curves
self.curve1 = p1.plot()
self.curve2 = p1.plot()
self.curve3 = p1.plot()
self.spectrogram = PlotWidget(self.splitter)
self.spectrogram.setObjectName("spectrogram")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1422, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuOpen = QtWidgets.QMenu(self.menuFile)
self.menuOpen.setObjectName("menuOpen")
self.menuActions = QtWidgets.QMenu(self.menubar)
self.menuActions.setObjectName("menuActions")
self.menuChannel_1 = QtWidgets.QMenu(self.menuActions)
self.menuChannel_1.setObjectName("menuChannel_1")
self.menuChannel_2 = QtWidgets.QMenu(self.menuActions)
self.menuChannel_2.setObjectName("menuChannel_2")
self.menuChannel_3 = QtWidgets.QMenu(self.menuActions)
self.menuChannel_3.setObjectName("menuChannel_3")
self.menuSpectrogram_Actions = QtWidgets.QMenu(self.menubar)
self.menuSpectrogram_Actions.setObjectName("menuSpectrogram_Actions")
self.menuColor_Palettes = QtWidgets.QMenu(self.menuSpectrogram_Actions)
self.menuColor_Palettes.setObjectName("menuColor_Palettes")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionChannel_1 = QtWidgets.QAction(MainWindow)
self.actionChannel_1.setObjectName("actionChannel_1")
self.actionChannel_2 = QtWidgets.QAction(MainWindow)
self.actionChannel_2.setObjectName("actionChannel_2")
self.actionChannel_3 = QtWidgets.QAction(MainWindow)
self.actionChannel_3.setObjectName("actionChannel_3")
self.actionSave_as_PDF = QtWidgets.QAction(MainWindow)
self.actionSave_as_PDF.setObjectName("actionSave_as_PDF")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionChange_Color = QtWidgets.QAction(MainWindow)
self.actionChange_Color.setObjectName("actionChange_Color")
self.actionChange_Color_2 = QtWidgets.QAction(MainWindow)
self.actionChange_Color_2.setObjectName("actionChange_Color_2")
self.actionChange_Color_3 = QtWidgets.QAction(MainWindow)
self.actionChange_Color_3.setObjectName("actionChange_Color_3")
self.actionAdd_Title = QtWidgets.QAction(MainWindow)
self.actionAdd_Title.setObjectName("actionAdd_Title")
self.actionAdd_Titlee = QtWidgets.QAction(MainWindow)
self.actionAdd_Titlee.setObjectName("actionAdd_Titlee")
self.actionAdd_Title_2 = QtWidgets.QAction(MainWindow)
self.actionAdd_Title_2.setObjectName("actionAdd_Title_2")
self.actionPalette_1 = QtWidgets.QAction(MainWindow)
self.actionPalette_1.setObjectName("actionPalette_1")
self.actionPalette_2 = QtWidgets.QAction(MainWindow)
self.actionPalette_2.setObjectName("actionPalette_2")
self.actionPalette_3 = QtWidgets.QAction(MainWindow)
self.actionPalette_3.setObjectName("actionPalette_3")
self.actionPalette_4 = QtWidgets.QAction(MainWindow)
self.actionPalette_4.setObjectName("actionPalette_4")
self.actionPalette_5 = QtWidgets.QAction(MainWindow)
self.actionPalette_5.setObjectName("actionPalette_5")
self.actionChannel_7 = QtWidgets.QAction(MainWindow)
self.actionChannel_7.setObjectName("actionChannel_7")
self.actionChannel_8 = QtWidgets.QAction(MainWindow)
self.actionChannel_8.setObjectName("actionChannel_8")
self.actionChannel_9 = QtWidgets.QAction(MainWindow)
self.actionChannel_9.setObjectName("actionChannel_9")
self.actionChannel_10 = QtWidgets.QAction(MainWindow)
self.actionChannel_10.setObjectName("actionChannel_10")
self.actionChannel_11 = QtWidgets.QAction(MainWindow)
self.actionChannel_11.setObjectName("actionChannel_11")
self.menuOpen.addAction(self.actionChannel_1)
self.menuOpen.addAction(self.actionChannel_2)
self.menuOpen.addAction(self.actionChannel_3)
self.menuFile.addAction(self.menuOpen.menuAction())
self.menuFile.addAction(self.actionSave_as_PDF)
self.menuFile.addAction(self.actionExit)
self.menuChannel_1.addAction(self.actionChange_Color)
self.menuChannel_2.addAction(self.actionChange_Color_2)
self.menuChannel_3.addAction(self.actionChange_Color_3)
self.menuActions.addAction(self.menuChannel_1.menuAction())
self.menuActions.addAction(self.menuChannel_2.menuAction())
self.menuActions.addAction(self.menuChannel_3.menuAction())
self.menuColor_Palettes.addAction(self.actionPalette_1)
self.menuColor_Palettes.addAction(self.actionPalette_2)
self.menuColor_Palettes.addAction(self.actionPalette_3)
self.menuColor_Palettes.addAction(self.actionPalette_4)
self.menuColor_Palettes.addAction(self.actionPalette_5)
self.menuSpectrogram_Actions.addAction(self.menuColor_Palettes.menuAction())
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menuSpectrogram_Actions.menuAction())
self.channel="Ch1"
self.timer1 = QtCore.QTimer()
self.timer2 = QtCore.QTimer()
self.timer3 = QtCore.QTimer()
self.rmin = 0
self.rmax = 256
self.gmin = 0
self.gmax = 256
self.bmin = 0
self.bmax = 256
self.timerInterval = 100
self.penColorIndex1=1
self.penColorIndex2=1
self.color = "#ffaa00"
self.label1 = "CHANNEL 1"
self.palette = "viridis"
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.signalComboBox.setCurrentText(_translate("MainWindow", "Choose a channel"))
self.signalComboBox.setItemText(0, _translate("MainWindow", "Choose a channel"))
self.signalComboBox.setItemText(1, _translate("MainWindow", "Ch1"))
self.signalComboBox.setItemText(2, _translate("MainWindow", "Ch2"))
self.signalComboBox.setItemText(3, _translate("MainWindow", "Ch3"))
self.pauseButtonCh.setText(_translate("MainWindow", "Pause"))
self.pauseButtonCh.setShortcut(_translate("MainWindow", "P"))
self.slowerButtonCh.setText(_translate("MainWindow", "Slower"))
self.slowerButtonCh.setShortcut(_translate("MainWindow", "W"))
self.zoomOutButtonCh.setText(_translate("MainWindow", "Zoom out"))
self.zoomOutButtonCh.setShortcut(_translate("MainWindow", "-"))
self.showButtonCh.setText(_translate("MainWindow", "Show"))
self.showButtonCh.setShortcut(_translate("MainWindow", "G"))
self.zoomInButtonCh.setText(_translate("MainWindow", "Zoom in"))
self.zoomInButtonCh.setShortcut(_translate("MainWindow", "+"))
self.spectroButtonCh.setText(_translate("MainWindow", "Spectro"))
self.spectroButtonCh.setShortcut(_translate("MainWindow", "Q"))
self.hideButtonCh.setText(_translate("MainWindow", "Hide"))
self.hideButtonCh.setShortcut(_translate("MainWindow", "H"))
self.label.setText(_translate("MainWindow", "Controls"))
self.playButtonCh.setText(_translate("MainWindow", "Play"))
self.playButtonCh.setShortcut(_translate("MainWindow", "S"))
self.fasterButtonCh.setText(_translate("MainWindow", "Faster"))
self.fasterButtonCh.setShortcut(_translate("MainWindow", "F"))
self.addLabelButton.setText(_translate("MainWindow", "Add Label"))
self.addLabelButton.setShortcut(_translate("MainWindow", "L"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuOpen.setTitle(_translate("MainWindow", "Open"))
self.menuActions.setTitle(_translate("MainWindow", "Actions"))
self.menuChannel_1.setTitle(_translate("MainWindow", "Channel 1"))
self.menuChannel_2.setTitle(_translate("MainWindow", "Channel 2"))
self.menuChannel_3.setTitle(_translate("MainWindow", "Channel 3"))
self.menuSpectrogram_Actions.setTitle(_translate("MainWindow", "Spectrogram Actions"))
self.menuColor_Palettes.setTitle(_translate("MainWindow", "Color Palettes"))
self.actionChannel_1.setText(_translate("MainWindow", "Channel 1"))
self.actionChannel_2.setText(_translate("MainWindow", "Channel 2"))
self.actionChannel_3.setText(_translate("MainWindow", "Channel 3"))
self.actionSave_as_PDF.setText(_translate("MainWindow", "Save as PDF"))
self.actionSave_as_PDF.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit.setShortcut(_translate("MainWindow", "esc"))
self.actionChange_Color.setText(_translate("MainWindow", "Change Color"))
self.actionChange_Color_2.setText(_translate("MainWindow", "Change Color"))
self.actionChange_Color_3.setText(_translate("MainWindow", "Change Color"))
self.actionAdd_Title.setText(_translate("MainWindow", "Add Title"))
self.actionAdd_Titlee.setText(_translate("MainWindow", "Add Title"))
self.actionAdd_Title_2.setText(_translate("MainWindow", "Add Title"))
self.actionPalette_1.setText(_translate("MainWindow", 'gray'))
self.actionPalette_2.setText(_translate("MainWindow", 'hsv'))
self.actionPalette_3.setText(_translate("MainWindow", 'summer'))
self.actionPalette_4.setText(_translate("MainWindow", 'viridis'))
self.actionPalette_5.setText(_translate("MainWindow", 'turbo'))
self.actionChannel_7.setText(_translate("MainWindow", "Channel 4"))
self.actionChannel_8.setText(_translate("MainWindow", "Channel 5"))
self.actionChannel_9.setText(_translate("MainWindow", "Channel 6"))
self.actionChannel_10.setText(_translate("MainWindow", "Channel 4"))
self.actionChannel_11.setText(_translate("MainWindow", "Channel 5"))
self.signalComboBox.currentTextChanged.connect(lambda: self.channelComboBox())
self.actionExit.triggered.connect(lambda: self.exitApp())
self.menuOpen.triggered.connect(lambda: self.channelComboBox())
self.zoomInButtonCh.clicked.connect(lambda: self.zoomIn(self.channel))
self.zoomOutButtonCh.clicked.connect(lambda: self.zoomOut(self.channel))
self.playButtonCh.clicked.connect(lambda: self.playCh(self.channel))
self.pauseButtonCh.clicked.connect(lambda: self.pauseCh1(self.channel))
self.spectroButtonCh.clicked.connect(lambda: self.spectro(self.channel))
self.slowerButtonCh.clicked.connect(lambda: self.slow(self.channel,self.timerInterval))
self.fasterButtonCh.clicked.connect(lambda: self.fast(self.channel,self.timerInterval))
self.showButtonCh.clicked.connect(lambda: self.show(self.channel))
self.hideButtonCh.clicked.connect(lambda: self.hide(self.channel))
self.actionChange_Color.triggered.connect(lambda: self.colorPicker())
self.actionChange_Color_2.triggered.connect(lambda: self.colorPicker())
self.actionChange_Color_3.triggered.connect(lambda: self.colorPicker())
self.actionPalette_1.triggered.connect(lambda : self.setpalette(self.actionPalette_1.text))
self.actionPalette_2.triggered.connect(lambda : self.setpalette(self.actionPalette_2.text))
self.actionPalette_3.triggered.connect(lambda : self.setpalette(self.actionPalette_3.text))
self.actionPalette_4.triggered.connect(lambda : self.setpalette(self.actionPalette_4.text))
self.actionPalette_5.triggered.connect(lambda : self.setpalette(self.actionPalette_5.text))
self.sc = SpecCanvas(self, width=4, height=5, dpi=100)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.sc)
def setpalette(self , txt):
self.palette = txt
self.spectro(self.channel)
def channelComboBox(self):
self.channel=self.signalComboBox.currentText()
if self.channel =="Ch1":
self.open_file()
elif self.channel=="Ch2":
self.open_file2()
else :
self.open_file3()
def open_file(self):
"""opens a file from the brower """
file_path=QFileDialog.getOpenFileName()
file_name=file_path[0].split('/')[-1]
self.read_data(file_name)
def open_file2(self):
"""opens a file from the brower """
file_path=QFileDialog.getOpenFileName()
file_name=file_path[0].split('/')[-1]
self.read_data2(file_name)
def open_file3(self):
"""opens a file from the brower """
file_path=QFileDialog.getOpenFileName()
file_name=file_path[0].split('/')[-1]
self.read_data3(file_name)
def read_data(self,file_name):
"""loads the data from chosen file"""
df1=pd.read_csv(file_name)
self.label1=file_name
time1=list(pd.to_numeric(df1['time'],downcast="float"))
amp1=list(pd.to_numeric(df1['amplitude'],downcast="float"))
self.draw(time1,amp1,self.color)
def read_data2(self,file_name):
"""loads the data from chosen file"""
df2=pd.read_csv(file_name)
self.label2=file_name #trial
time2=list(pd.to_numeric(df2['time'],downcast="float"))
amp2=list(pd.to_numeric(df2['amplitude'],downcast="float"))
self.draw2(time2,amp2,self.color)
def read_data3(self,file_name):
"""loads the data from chosen file"""
df3=pd.read_csv(file_name)
self.label3=file_name
time3=list(pd.to_numeric(df3['time'],downcast="float"))
amp3=list(pd.to_numeric(df3['amplitude'],downcast="float"))
self.draw3(time3,amp3,self.color)
def draw(self,time,amp,color):
"""sets up our canvas to plot"""
self.time = time
self.amp=amp
self.index=0
pen = pyqtgraph.mkPen(color) #signal color
self.curve1.setData(self.time[0:self.index+500], self.amp[0:self.index+500], pen=pen)
self.timer1.setInterval(100)
self.timer1.timeout.connect(lambda:self.update_plot_data(self.time,self.amp))
self.timer1.start()
def draw2(self,time2,amp2,color):
"""sets up our canvas to plot"""
self.time2 = time2
self.amp2=amp2
self.index2=0
pen = pyqtgraph.mkPen(color) #signal color
self.curve2.setData(self.time2[0:self.index2+500], self.amp2[0:self.index2+500], pen=pen)
self.timer2.setInterval(100)
self.timer2.timeout.connect(lambda:self.update_plot_data2(self.time2,self.amp2))
self.timer2.start()
def draw3(self,time3,amp3,color):
"""sets up our canvas to plot"""
self.time3 = time3
self.amp3=amp3
self.index3=0
pen = pyqtgraph.mkPen(color) #signal color
self.curve3.setData(self.time3[0:self.index3+500], self.amp3[0:self.index3+500], pen=pen)
self.timer3.setInterval(100)
self.timer3.timeout.connect(lambda:self.update_plot_data3(self.time3,self.amp3))
self.timer3.start()
def update_plot_data(self,time,amp):
"""updates the data plotted on graph to get dynamic signal"""
dynamic_time = time[0:self.index+500]
dynamic_amp = amp[0:self.index+500]
self.index=self.index+500
if self.index+500>len(time):
self.index=0
self.curve1.setData(dynamic_time, dynamic_amp) # Update the data
def update_plot_data2(self,time2,amp2):
"""updates the data plotted on graph to get dynamic signal"""
dynamic_time2 = time2[0:self.index2+500]
dynamic_amp2 = amp2[0:self.index2+500]
self.index2=self.index2+500
if self.index2+500>len(time2):
self.index2=0
self.curve2.setData(dynamic_time2, dynamic_amp2) # Update the data
def update_plot_data3(self,time3,amp3):
"""updates the data plotted on graph to get dynamic signal"""
dynamic_time3 = time3[0:self.index3+500]
dynamic_amp3 = amp3[0:self.index3+500]
self.index3=self.index3+500
if self.index3+500>len(time3):
self.index3=0
self.curve3.setData(dynamic_time3, dynamic_amp3) # Update the data
def colorPicker(self):
self.on_click()
@pyqtSlot()
def on_click(self):
self.openColorDialog()
def openColorDialog(self):
color = QColorDialog.getColor()
self.color=color
if color.isValid():
print(color.name())
def exitApp(self):
sys.exit()
def addTitle(self, ch):
if ch=="Ch1":
self.label.setText(self.label1)
self.update()
else:
self.label_2.setText(self.label2)
self.update()
def zoomOut(self,ch):
self.graphCh1.plot.getViewBox().scaleBy((2,2))
def zoomIn(self,ch):
self.graphCh1.plot.getViewBox().scaleBy((0.5,0.5))
def playCh(self, channel):
if channel =="Ch1":
self.timer1.start()
elif channel=="Ch2":
self.timer2.start()
else:
self.timer3.start()
def pauseCh1(self, channel):
"""pauses the dynamic signal in ch1"""
if channel =="Ch1":
self.timer1.stop()
elif channel=="Ch2":
self.timer2.stop()
else:
self.timer3.stop()
def slow(self,channel,timerInterval):
self.timerInterval = self.timerInterval * 2
if channel== "Ch1":
self.timer1.setInterval(self.timerInterval)
elif channel =="Ch2":
self.timer2.setInterval(self.timerInterval)
else:
self.timer3.setInterval(self.timerInterval)
def fast(self,channel,timerInterval):
self.timerInterval = self.timerInterval / 2
if channel == "Ch1":
self.timer1.setInterval(self.timerInterval)
elif channel=="Ch2":
self.timer2.setInterval(self.timerInterval)
else:
self.timer3.setInterval(self.timerInterval)
def show(self,ch):
if ch== "Ch1":
self.penColorIndex1=1
elif ch== "Ch2":
self.penColorIndex2=1
else:
self.penColorIndex3=1
def hide(self,ch):
if ch== "Ch1":
self.penColorIndex1=0
elif ch== "Ch2":
self.penColorIndex2=0
else:
self.penColorIndex3=0
def maxSlider(self):
slider_value=self.spectroMaxSlider.value()
self.rmax=slider_value
self.gmax=slider_value
self.bmax=slider_value
self.spectroPixel(self)
def minSlider(self):
slider_value=self.spectroMinSlider.value()
self.rmin=slider_value
self.gmin=slider_value
self.bmin=slider_value
self.spectroPixel(self)
def spectroPixel(self):
self.specimage = self.figToImg(self.fig ,dpi=90)
self.img = np.rot90(self.img , k=1 , axes= (1,0))
self.image = pyqtgraph.ImageItem(self.img)
self.image.setLevels([[ self.rmin , self.rmax] , [ self.gmin , self.gmax], [ self.bmin , self.bmax]])
self.spectrogram.clear()
self.plotGraph.addItem(self.image)
def figToImg(self ,fig ,dpi = 90):
buf = io.BytesIO()
fig.savefig(buf, format="jpeg", dpi=dpi)
buf.seek(0)
arrimg = np.frombuffer(buf.getvalue(), dtype=np.uint8)
print(buf.getvalue())
buf.close()
self.img = cv2.imdecode(arrimg, 1)
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
return self.img
def spectro(self , channel):
if channel =="Ch1":
fig=plt.figure()
self.spec_gram = plt.specgram(self.amp, Fs=200 , cmap = self.palette)
self.plotGraph = pyqtgraph.PlotItem()
pyqtgraph.PlotItem.enableAutoScale(self.plotGraph)
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'left')
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'bottom')
self.spectrogram.setCentralItem(self.plotGraph)
self.img=self.figToImg(fig)
self.img = np.rot90(self.img , k=1 , axes= (1,0))
self.image = pyqtgraph.ImageItem(self.img)
self.image.setLevels([[ self.rmin , self.rmax] , [ self.gmin , self.gmax], [ self.bmin , self.bmax]])
self.plotGraph.addItem(self.image)
elif channel =="Ch2":
fig=plt.figure()
self.spec_gram = plt.specgram(self.amp2, Fs=256 , cmap = self.palette)
self.plotGraph = pyqtgraph.PlotItem()
pyqtgraph.PlotItem.enableAutoScale(self.plotGraph)
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'left')
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'bottom')
self.spectrogram.setCentralItem(self.plotGraph)
self.img=self.figToImg(fig)
self.img = np.rot90(self.img , k=1 , axes= (1,0))
self.image = pyqtgraph.ImageItem(self.img)
self.image.setLevels([[ self.rmin , self.rmax] , [ self.gmin , self.gmax], [ self.bmin , self.bmax]])
self.plotGraph.addItem(self.image)
elif channel == "Ch3":
fig=plt.figure()
self.spec_gram = plt.specgram(self.amp3, Fs=256 , cmap = self.palette)
self.plotGraph = pyqtgraph.PlotItem()
pyqtgraph.PlotItem.enableAutoScale(self.plotGraph)
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'left')
pyqtgraph.PlotItem.hideAxis(self.plotGraph,'bottom')
self.spectrogram.setCentralItem(self.plotGraph)
self.img=self.figToImg(fig)
self.img = np.rot90(self.img , k=1 , axes= (1,0))
self.image = pyqtgraph.ImageItem(self.img)
#self.image.setLevels([[ self.rmin , self.rmax] , [ self.gmin , self.gmax], [ self.bmin , self.bmax]])
self.plotGraph.addItem(self.image)
else:
pass
# def spectro(self,channel):
# if channel=="Ch1":
# if not self.amp:
# pass
# else:
# self.sc.axes.specgram(self.amp , Fs =5 )
# #self.plotGraph.setBackgroundColor((0, 256, 256))
# self.sc.draw()
# # self.spectrogram.clear()
# # self.spectrogram.removeItem(self.layout)
# elif channel =="Ch2":
# if not self.amp2:
# pass
# else:
# self.sc.axes.specgram(self.amp2 , Fs =5 )
# # self.layout.addWidget(self.sc)
# # self.spectrogram.clear()
# self.sc.draw()
# else:
# if not self.amp3:
# pass
# else:
# self.sc.canvas.axes.specgram(self.amp3 , Fs =5 )
# # self.layout.addWidget(sc)
# # self.spectrogram.clear()
# self.sc.draw()
# # self.spectrogram.setLayout(self.layout)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 44.221252 | 119 | 0.664582 |
7956b7e1c70ceeeb2e94618452f1ca400d2722a1 | 8,857 | py | Python | docs/conf.py | cosanlab/multivariate_inference | f591748e6ebd792f4192c1d04014e872af50d08d | [
"MIT"
] | null | null | null | docs/conf.py | cosanlab/multivariate_inference | f591748e6ebd792f4192c1d04014e872af50d08d | [
"MIT"
] | null | null | null | docs/conf.py | cosanlab/multivariate_inference | f591748e6ebd792f4192c1d04014e872af50d08d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# multivariate_inference documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import multivariate_inference
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon']
# Sphinx Napolean settings
napoleon_use_ivar = True
napoleon_include_private_with_doc = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'multivariate_inference'
copyright = u"2018, Eshin Jolly"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = multivariate_inference.__version__
# The full version, including alpha/beta/rc tags.
release = multivariate_inference.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# Dont sort alphabetically
autodoc_member_order = 'bysource'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'multivariate_inferencedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'multivariate_inference.tex',
u'multivariate_inference Documentation',
u'Eshin Jolly', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'multivariate_inference',
u'multivariate_inference Documentation',
[u'Eshin Jolly'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'multivariate_inference',
u'multivariate_inference Documentation',
u'Eshin Jolly',
'multivariate_inference',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.860627 | 76 | 0.722366 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.