hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7954c8af792875efbcc8e9429c327a6d2a9d4746 | 692 | py | Python | final_project/server.py | bkcelebi/flaskTranslatorEng-Fr | 442c86e04c9bb91f84d13abfefea18754b9e49d9 | [
"Apache-2.0"
] | null | null | null | final_project/server.py | bkcelebi/flaskTranslatorEng-Fr | 442c86e04c9bb91f84d13abfefea18754b9e49d9 | [
"Apache-2.0"
] | null | null | null | final_project/server.py | bkcelebi/flaskTranslatorEng-Fr | 442c86e04c9bb91f84d13abfefea18754b9e49d9 | [
"Apache-2.0"
] | null | null | null | from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
# Write your code here
return translator.english_to_french(textToTranslate)
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
return translator.french_to_english(textToTranslate)
@app.route("/")
def renderIndexPage():
# Write the code to render template
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 25.62963 | 57 | 0.748555 |
7954c9b1ec3ab694f0dd79d71de4ff7164db79b8 | 6,635 | py | Python | vtk_example.py | SebastianRiedel/python_scratchbook | cdd5a9864873fce2d510ae0977f68d36b3caa56b | [
"MIT"
] | null | null | null | vtk_example.py | SebastianRiedel/python_scratchbook | cdd5a9864873fce2d510ae0977f68d36b3caa56b | [
"MIT"
] | null | null | null | vtk_example.py | SebastianRiedel/python_scratchbook | cdd5a9864873fce2d510ae0977f68d36b3caa56b | [
"MIT"
] | null | null | null | # Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from mayavi import mlab
# To access any VTK object, we use 'tvtk', which is a Python wrapping of
# VTK replacing C++ setters and getters by Python properties and
# converting numpy arrays to VTK arrays when setting data.
from tvtk.api import tvtk
import numpy as np
from transformations import transformations as tf
def colored_lines():
# Create three points. Join (Origin and P0) with a red line and
# (Origin and P1) with a green line
origin = [0.0, 0.0, 0.0]
p0 = [1.0, 0.0, 0.0]
p1 = [0.0, 1.0, 0.0]
# Create a vtkPoints object and store the points in it
pts = tvtk.Points()
pts.insert_next_point(origin)
pts.insert_next_point(p0)
pts.insert_next_point(p1)
# Setup two colors - one for each line
red = [255, 0, 0]
green = [0, 255, 0]
# Setup the colors array
colors = tvtk.UnsignedCharArray()
colors.number_of_components = 3
colors.name = "Colors"
# Add the colors we created to the colors array
colors.insert_next_tuple_value(red)
colors.insert_next_tuple_value(green)
# Create the first line (between Origin and P0)
line0 = tvtk.Line()
line0.point_ids.set_id(0,0) # the second 0 is the index of the Origin in the vtkPoints
line0.point_ids.set_id(1,1) # the second 1 is the index of P0 in the vtkPoints
# Create the second line (between Origin and P1)
line1 = tvtk.Line()
line1.point_ids.set_id(0,0) # the second 0 is the index of the Origin in the vtkPoints
line1.point_ids.set_id(1,2) # 2 is the index of P1 in the vtkPoints
# Create a cell array to store the lines in and add the lines to it
lines = tvtk.CellArray()
lines.insert_next_cell(line0)
lines.insert_next_cell(line1)
# Create a polydata to store everything in
linesPolyData = tvtk.PolyData()
# Add the points to the dataset
linesPolyData.points =pts
# Add the lines to the dataset
linesPolyData.lines = lines
# Color the lines - associate the first component (red) of the
# colors array with the first component of the cell array (line 0)
# and the second component (green) of the colors array with the
# second component of the cell array (line 1)
linesPolyData.cell_data.scalars =colors
return linesPolyData
"""
make little polydata indicating a pose, sphere at the pose origin, cylinder in z-axis direction
flag at the end of cylinder in x-direction
"""
def pose_indicator(scale=0.1):
# setting up the sphere at pose origin
sphere = tvtk.SphereSource()
sphere.radius = 0.1
sphere.theta_resolution = sphere.theta_resolution * 2
sphere.phi_resolution = sphere.phi_resolution * 2
sphere.update()
sphere_poly = sphere.output
colors = np.empty((sphere_poly.number_of_cells, 3), dtype=np.int)
colors[:,0] = 0 # red
colors[:,1] = 255 # green
colors[:,2] = 0 # blue
sphere_color = tvtk.UnsignedCharArray()
sphere_color.from_array(colors)
sphere_color.name = 'Colors'
sphere_poly.cell_data.scalars = sphere_color
# setting up cylinder in z direction
line = tvtk.LineSource()
line.point1 = [0.0, 0.0, 0.0]
line.point2 = [0.0, 0.0, 1.0]
tube_filter = tvtk.TubeFilter(input=line.output)
tube_filter.capping = 1
tube_filter.radius = 0.05
tube_filter.number_of_sides = 8
tube_filter.update()
tube_poly = tube_filter.output
colors = np.empty((tube_poly.number_of_cells, 3), dtype=np.int)
colors[:,0] = 0 # red
colors[:,1] = 0 # green
colors[:,2] = 255 # blue
tube_color = tvtk.UnsignedCharArray()
tube_color.from_array(colors)
tube_color.name = 'Colors'
tube_poly.cell_data.scalars = tube_color
# setting up plane in x-direction at top of marker
size = 0.25
plane = tvtk.PlaneSource()
plane.origin = [0.0, 0.0, 1.0]
plane.point1 = [0.0, 0.0, 1.0 - size]
plane.point2 = [size, 0.0, 1.0]
plane.update()
plane_poly = plane.output
colors = np.empty((plane_poly.number_of_cells, 3), dtype=np.int)
colors[:,0] = 255 # red
colors[:,1] = 0 # green
colors[:,2] = 0 # blue
plane_color = tvtk.UnsignedCharArray()
plane_color.from_array(colors)
plane_color.name = 'Colors'
plane_poly.cell_data.scalars = plane_color
# combine into one polydata object
combined = tvtk.AppendPolyData()
combined.add_input(sphere_poly)
combined.add_input(tube_poly)
combined.add_input(plane_poly)
# combined.update()
combined_poly = combined.output
# scale combined output
scale_mat = np.eye(4)
scale_mat[0,0] = scale
scale_mat[1,1] = scale
scale_mat[2,2] = scale
scale = tvtk.Transform()
scale.set_matrix(scale_mat.flatten())
scaler = tvtk.TransformPolyDataFilter(input=combined_poly)
scaler.transform = scale
scaled_poly = scaler.output
return scaled_poly
"""
loads an obj mesh for ploting with plot_mesh
returned is a tvtk.PolyData
"""
def load_obj(filename):
obj = tvtk.OBJReader()
obj.file_name = filename
mesh = obj.output
return mesh
"""
displays polydata in a given pose, optional adds a coord frame to the origin with a given
scale (scale == 0.0 --> no coord frame displayed)
"""
def plot_polydata(polydata, T_obj2world=np.eye(4), axes_scale=0.0, opacity=0.2, color=(0,0,1), figure=None):
mapper = tvtk.PolyDataMapper(input=polydata)
p = tvtk.Property(opacity=opacity, color=color)
actor = tvtk.Actor(mapper=mapper, property=p)
# set pose
mat = tvtk.Matrix4x4()
mat.from_array(T_obj2world)
actor.user_matrix = mat
if not figure:
figure = mlab.figure()
figure.scene.add_actor(actor)
if axes_scale > 0.0:
plot_coordframe(axes_scale, figure)
return figure
"""
plots a coord frame at the scenes origin with definable scale
"""
def plot_coordframe(scale=1.0, figure=None):
axes = tvtk.AxesActor()
axes.axis_labels = 0
axes.total_length = [scale, scale, scale]
if not figure:
figure = mlab.figure()
figure.scene.add_actor(axes)
return figure
def main():
model = load_obj('./mug.obj')
p = pose_indicator(0.03)
angles = np.random.rand(10) * 2 * np.pi
axis = np.array([0,1,0])
quaternions = np.empty((len(angles), 4), dtype=np.double)
quaternions[:,0] = np.cos(angles / 2.0)
quaternions[:,1:4] = axis[None, :]
quaternions[:,1:4] = quaternions[:,1:4] * np.sin(angles / 2.0)[:,None]
f = mlab.figure()
for q in quaternions:
angles = tf.euler_from_quaternion(q)
mat = tf.compose_matrix(angles=angles, translate=np.array([0,0,0]))
plot_polydata(p, mat, opacity=1.0, figure=f)
plot_polydata(model, figure=f)
plot_coordframe(0.1, f)
mlab.show(stop=True)
if __name__ == '__main__':
main()
| 27.878151 | 108 | 0.702185 |
7954ccce87979aa5e57f3a60abf6bc169d407823 | 1,531 | py | Python | mwcleric/fandom_client.py | AttemptToCallNil/mwcleric | 83a4a21b39de6cfde78131ea2237632947a0f705 | [
"MIT"
] | null | null | null | mwcleric/fandom_client.py | AttemptToCallNil/mwcleric | 83a4a21b39de6cfde78131ea2237632947a0f705 | [
"MIT"
] | null | null | null | mwcleric/fandom_client.py | AttemptToCallNil/mwcleric | 83a4a21b39de6cfde78131ea2237632947a0f705 | [
"MIT"
] | null | null | null | from mwclient import InvalidResponse
from .auth_credentials import AuthCredentials
from .cargo_client import CargoClient
from .site import Site
from .wiki_client import WikiClient
class FandomClient(WikiClient):
"""
Functions for connecting to and editing specifically Gamepedia wikis.
"""
cargo_client: CargoClient = None
client: Site = None
wiki: str = None
def __init__(self, wiki: str, client: Site = None,
credentials: AuthCredentials = None, lang: str = None,
**kwargs):
"""
Create a site object.
:param wiki: Name of a wiki
:param client: WikiClient object. If this is provided, SessionManager will not be used.
:param credentials: Optional. Provide if you want a logged-in session.
:param stg: if it's a staging wiki or not
"""
url = '{}.fandom.com'.format(wiki)
self.lang = '/' + ('' if lang is None else lang + '/')
super().__init__(url=url, path=self.lang, credentials=credentials, client=client, **kwargs)
self.cargo_client = CargoClient(self.client)
def relog(self):
super().relog()
self.cargo_client = CargoClient(self.client)
def login(self):
if self.credentials is None:
return
try:
self.client.login(username=self.credentials.username, password=self.credentials.password)
except InvalidResponse:
self.url = self.url.replace('gamepedia', 'fandom')
self.relog()
| 33.282609 | 101 | 0.636839 |
7954cd0e1b4d31901fa1aab1e61cdb82b9310a0e | 218 | py | Python | setup.py | singh-karanpal/Capstone | 807ca3f70276a0dd17244a123a759a914d358424 | [
"MIT"
] | null | null | null | setup.py | singh-karanpal/Capstone | 807ca3f70276a0dd17244a123a759a914d358424 | [
"MIT"
] | null | null | null | setup.py | singh-karanpal/Capstone | 807ca3f70276a0dd17244a123a759a914d358424 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='BCStats Capstone project 2020',
author='Karanpal Singh',
license='MIT',
)
| 19.818182 | 48 | 0.674312 |
7954cd677edf5c959284af1950fecf63f02372cf | 1,789 | py | Python | iperon/contrib/pwned.py | kostya-ten/iperon | ad00b7bcbab3675a73448073c52b8b383e8354d5 | [
"Apache-2.0"
] | 1 | 2022-01-11T17:38:30.000Z | 2022-01-11T17:38:30.000Z | iperon/contrib/pwned.py | kostya-ten/iperon | abbfc12b15cdbd496e50720f957d5bce5ab8f382 | [
"Apache-2.0"
] | 5 | 2022-01-12T07:59:25.000Z | 2022-02-09T20:53:01.000Z | iperon/contrib/pwned.py | kostya-ten/iperon | abbfc12b15cdbd496e50720f957d5bce5ab8f382 | [
"Apache-2.0"
] | 1 | 2022-01-11T18:54:28.000Z | 2022-01-11T18:54:28.000Z | import hashlib
import logging
import httpx
from pydantic import BaseModel
from ..exceptions import PwnedConnectionErrorException
from ..http_base_client import HttpBaseClient
from ..redis import RedisClient
PWNED_URL = 'https://api.pwnedpasswords.com/range'
logger = logging.getLogger('iperon')
class Pwned(BaseModel):
@classmethod
async def check(cls, password: str) -> int:
"""Checks the success of password in the pwned service
:param password: Password
:type password: str
:return: Returns how many times it appears in the data set.
:rtype: int
"""
hash_obj = hashlib.sha1(password.encode()) # skipcq: PTC-W1003
hash_password = hash_obj.hexdigest().upper()
async with RedisClient() as redis_client:
if result := await redis_client.get(f'pwned-{hash_password}'):
return int(result)
async with HttpBaseClient(base_url=PWNED_URL) as client:
response: httpx.Response = await client.get(url=f'/{hash_password[:5]}')
result = 0
if response.status_code == 404: # pragma: no cover
result = 0
elif response.status_code != 200: # pragma: no cover
raise PwnedConnectionErrorException()
else:
for line in response.text.splitlines():
line_hash, _, times = line.partition(':')
if hash_password[5:] == line_hash:
async with RedisClient() as redis_client:
await redis_client.set(f'pwned-{hash_password}', times, 60*60*24*90)
result = int(times)
break
return result # pragma: no cover
check = Pwned.check
| 30.844828 | 96 | 0.600894 |
7954cdb1e1d128fb0c44e5df8cf1de62dd938223 | 2,254 | py | Python | 7. Trees/abstract_data_type_tree.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | 7. Trees/abstract_data_type_tree.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | 7. Trees/abstract_data_type_tree.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | # The Tree Abstract Data Type
# Implementation of Tree abstract base class
class Tree:
"""Abstract base class representing a tree structure."""
#-------------------------------- nested Position class -----------------------------------
class Position:
"""An abstraction representing the location of a single element."""
def element(self):
"""Return the element stored at this Position."""
raise NotImplementedError('must be implemented by subclass')
def __eq__(self, other):
"""Return True if other Position represents the same location."""
raise NotImplementedError('must be implemented by subclass')
def __ne__(self, other):
"""Return True if other does not represent the same location."""
return not (self == other)
# ------------ abstract methods that concrete subclass must support ----------------------
def root(self):
"""Return Position representing the tree's root (or None if empty)."""
raise NotImplementedError('must be implemented by subclass')
def parent(self, p):
"""Return Position representing p's parent (or None if p is root)."""
raise NotImplementedError('must be implemented by subclass')
def num_childern(self, p):
"""Return the number of children that Position p has."""
raise NotImplementedError('must be implemented by subclass')
def children(self, p):
"""Generate an iterate of Positions representing p's children."""
raise NotImplementedError('must be imlpemented by subclass')
def __len__(self):
""" Return the total number of elements in the tree."""
raise NotImplementedError('must be implemented by subclass')
# ------------------- concrete methods implemented in this class -------------------------
def is_root(self, p):
"""Return True if Position p represents the root of the tree."""
return self.root() == p
def is_leaf(self, p):
"""Return True if Position p does not have any children."""
return self.num_childern(p) == 0
def is_empty(self):
"""Return True if the tree is empty."""
return len(self) == 0
| 40.25 | 95 | 0.602041 |
7954cddf64ac6d0a7ccef9cffbebd8a50d005086 | 8,699 | py | Python | model-optimizer/mo/utils/pipeline_config.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/utils/pipeline_config.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | 19 | 2021-03-26T08:11:00.000Z | 2022-02-21T13:06:26.000Z | model-optimizer/mo/utils/pipeline_config.py | calvinfeng/openvino | 11f591c16852637506b1b40d083b450e56d0c8ac | [
"Apache-2.0"
] | null | null | null | """
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import re
from mo.utils.error import Error
from mo.utils.simple_proto_parser import SimpleProtoParser
# The list of rules how to map the value from the pipeline.config file to the dictionary with attributes.
# The rule is either a string or a tuple with two elements. In the first case the rule string is used as a key to
# search in the parsed pipeline.config file attributes dictionary and a key to save found value. In the second case the
# first element of the tuple is the key to save found value; the second element of the tuple is a string defining the
# path to the value of the attribute in the pipeline.config file. The path consists of the regular expression strings
# defining the dictionary key to look for separated with a '/' character.
mapping_rules = [
'num_classes',
# preprocessing block attributes
('resizer_image_height', 'image_resizer/fixed_shape_resizer/height'),
('resizer_image_width', 'image_resizer/fixed_shape_resizer/width'),
('resizer_min_dimension', 'image_resizer/keep_aspect_ratio_resizer/min_dimension'),
('resizer_max_dimension', 'image_resizer/keep_aspect_ratio_resizer/max_dimension'),
('pad_to_max_dimension', 'image_resizer/keep_aspect_ratio_resizer/pad_to_max_dimension', False),
# anchor generator attributes
('anchor_generator_height', 'first_stage_anchor_generator/grid_anchor_generator/height$', 256),
('anchor_generator_width', 'first_stage_anchor_generator/grid_anchor_generator/width$', 256),
('anchor_generator_height_stride', 'first_stage_anchor_generator/grid_anchor_generator/height_stride', 16),
('anchor_generator_width_stride', 'first_stage_anchor_generator/grid_anchor_generator/width_stride', 16),
('anchor_generator_scales', 'first_stage_anchor_generator/grid_anchor_generator/scales'),
('anchor_generator_aspect_ratios', 'first_stage_anchor_generator/grid_anchor_generator/aspect_ratios'),
('multiscale_anchor_generator_min_level', 'anchor_generator/multiscale_anchor_generator/min_level'),
('multiscale_anchor_generator_max_level', 'anchor_generator/multiscale_anchor_generator/max_level'),
('multiscale_anchor_generator_anchor_scale', 'anchor_generator/multiscale_anchor_generator/anchor_scale'),
('multiscale_anchor_generator_aspect_ratios', 'anchor_generator/multiscale_anchor_generator/aspect_ratios'),
('multiscale_anchor_generator_scales_per_octave', 'anchor_generator/multiscale_anchor_generator/scales_per_octave'),
# SSD anchor generator attributes
('ssd_anchor_generator_min_scale', 'anchor_generator/ssd_anchor_generator/min_scale', 0.2),
('ssd_anchor_generator_max_scale', 'anchor_generator/ssd_anchor_generator/max_scale', 0.95),
('ssd_anchor_generator_num_layers', 'anchor_generator/ssd_anchor_generator/num_layers'),
('ssd_anchor_generator_aspect_ratios', 'anchor_generator/ssd_anchor_generator/aspect_ratios'),
('ssd_anchor_generator_scales', 'anchor_generator/ssd_anchor_generator/scales'),
('ssd_anchor_generator_interpolated_scale_aspect_ratio',
'anchor_generator/ssd_anchor_generator/interpolated_scale_aspect_ratio', 1.0),
('ssd_anchor_generator_reduce_lowest', 'anchor_generator/ssd_anchor_generator/reduce_boxes_in_lowest_layer'),
('ssd_anchor_generator_base_anchor_height', 'anchor_generator/ssd_anchor_generator/base_anchor_height', 1.0),
('ssd_anchor_generator_base_anchor_width', 'anchor_generator/ssd_anchor_generator/base_anchor_width', 1.0),
# Proposal and ROI Pooling layers attributes
('first_stage_nms_score_threshold', '.*_nms_score_threshold'),
('first_stage_nms_iou_threshold', '.*_nms_iou_threshold'),
('first_stage_max_proposals', '.*_max_proposals'),
('num_spatial_bins_height', '.*/rfcn_box_predictor/num_spatial_bins_height'),
('num_spatial_bins_width', '.*/rfcn_box_predictor/num_spatial_bins_width'),
('crop_height', '.*/rfcn_box_predictor/crop_height'),
('crop_width', '.*/rfcn_box_predictor/crop_width'),
'initial_crop_size',
('use_matmul_crop_and_resize', 'use_matmul_crop_and_resize', False),
('add_background_class', 'add_background_class', True),
# Detection Output layer attributes
('postprocessing_score_converter', '.*/score_converter'),
('postprocessing_score_threshold', '.*/batch_non_max_suppression/score_threshold'),
('postprocessing_iou_threshold', '.*/batch_non_max_suppression/iou_threshold'),
('postprocessing_max_detections_per_class', '.*/batch_non_max_suppression/max_detections_per_class'),
('postprocessing_max_total_detections', '.*/batch_non_max_suppression/max_total_detections'),
('share_box_across_classes', 'second_stage_box_predictor/.*/share_box_across_classes$', False),
# Variances for predicted bounding box deltas (tx, ty, tw, th)
('frcnn_variance_x', 'box_coder/faster_rcnn_box_coder/x_scale', 10.0),
('frcnn_variance_y', 'box_coder/faster_rcnn_box_coder/y_scale', 10.0),
('frcnn_variance_width', 'box_coder/faster_rcnn_box_coder/width_scale', 5.0),
('frcnn_variance_height', 'box_coder/faster_rcnn_box_coder/height_scale', 5.0)
]
class PipelineConfig:
"""
The class that parses pipeline.config files used to generate TF models generated using Object Detection API.
The class stores data read from the file in a plain dictionary for easier access using the get_param function.
"""
_raw_data_dict = dict()
_model_params = dict()
def __init__(self, file_name: str):
self._raw_data_dict = SimpleProtoParser().parse_file(file_name)
if not self._raw_data_dict:
raise Error('Failed to parse pipeline.config file {}'.format(file_name))
self._initialize_model_params()
@staticmethod
def _get_value_by_path(params: dict, path: list):
if not path or len(path) == 0:
return None
if not isinstance(params, dict):
return None
compiled_regexp = re.compile(path[0])
for key in params.keys():
if re.match(compiled_regexp, key):
if len(path) == 1:
return params[key]
else:
value = __class__._get_value_by_path(params[key], path[1:])
if value is not None:
return value
return None
def _update_param_using_rule(self, params: dict, rule: [str, tuple]):
if isinstance(rule, str):
if rule in params:
self._model_params[rule] = params[rule]
log.debug('Found value "{}" for path "{}"'.format(params[rule], rule))
elif isinstance(rule, tuple):
if len(rule) != 2 and len(rule) != 3:
raise Error('Invalid rule length. Rule must be a tuple with two elements: key and path, or three '
'elements: key, path, default_value.')
value = __class__._get_value_by_path(params, rule[1].split('/'))
if value is not None:
log.debug('Found value "{}" for path "{}"'.format(value, rule[1]))
self._model_params[rule[0]] = value
elif len(rule) == 3:
self._model_params[rule[0]] = rule[2]
log.debug('There is no value path "{}". Set default value "{}"'.format(value, rule[2]))
else:
raise Error('Invalid rule type. Rule can be either string or tuple')
def _initialize_model_params(self):
"""
Store global params in the dedicated dictionary self._model_params for easier use.
:return: None
"""
if 'model' not in self._raw_data_dict:
raise Error('The "model" key is not found in the configuration file. Looks like the parsed file is not '
'Object Detection API model configuration file.')
params = list(self._raw_data_dict['model'].values())[0]
for rule in mapping_rules:
self._update_param_using_rule(params, rule)
def get_param(self, param: str):
if param not in self._model_params:
return None
return self._model_params[param]
| 56.487013 | 120 | 0.724681 |
7954ce7033922810b5a085321f6cb82731c815ae | 1,070 | py | Python | actions/nodes_node_qemu_vmid_firewall_ipset_name_delete_ipset.py | nzlosh/stackstorm-proxmox | fc4388f0b62dd1d4a360e9542eb55dea387149f4 | [
"Apache-2.0"
] | null | null | null | actions/nodes_node_qemu_vmid_firewall_ipset_name_delete_ipset.py | nzlosh/stackstorm-proxmox | fc4388f0b62dd1d4a360e9542eb55dea387149f4 | [
"Apache-2.0"
] | null | null | null | actions/nodes_node_qemu_vmid_firewall_ipset_name_delete_ipset.py | nzlosh/stackstorm-proxmox | fc4388f0b62dd1d4a360e9542eb55dea387149f4 | [
"Apache-2.0"
] | null | null | null | import json
from packlib.base import ProxmoxAction
class NodesNodeQemuVmidFirewallIpsetNameDeleteIpsetAction(ProxmoxAction):
"""
Delete IPSet
"""
def run(self, name, node, vmid, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["name", name, "string"],
["node", node, "string"],
["vmid", vmid, "integer"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.delete(
f"nodes/{node}/qemu/{vmid}/firewall/ipset/{name}", **proxmox_kwargs
)
| 31.470588 | 79 | 0.530841 |
7954ceb5941b74631fcac32ba1e35a9ee562fe36 | 829 | py | Python | DQM/SiStripMonitorTrack/python/SiStripMonitorTrack_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DQM/SiStripMonitorTrack/python/SiStripMonitorTrack_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DQM/SiStripMonitorTrack/python/SiStripMonitorTrack_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from DQM.SiStripMonitorTrack.SiStripMonitorTrack_cfi import *
# TrackInfo ####
from RecoTracker.TrackProducer.TrackRefitters_cff import *
#-----------------------
# Reconstruction Modules
#-----------------------
from RecoLocalTracker.SiStripZeroSuppression.SiStripZeroSuppression_cfi import *
from EventFilter.SiStripRawToDigi.SiStripDigis_cfi import *
DQMSiStripMonitorTrack_Sim = cms.Sequence(siStripDigis*siStripZeroSuppression*TrackRefitter*SiStripMonitorTrack)
DQMSiStripMonitorTrack_Real = cms.Sequence(SiStripMonitorTrack)
SiStripMonitorTrack.TrackProducer = 'TrackRefitter'
SiStripMonitorTrack.TrackLabel = ''
SiStripMonitorTrack.Cluster_src = 'siStripClusters'
SiStripMonitorTrack.Mod_On = False
SiStripMonitorTrack.OffHisto_On = False
TrackRefitter.TrajectoryInEvent = True
| 41.45 | 112 | 0.816647 |
7954cff3f786cc73d526a46e114e1e046750fdcf | 11,927 | py | Python | test/distributed/elastic/rendezvous/utils_test.py | guoyejun/pytorch | 57cba8e60116bfff37d10bc2b4596d8c478ffd0a | [
"Intel"
] | null | null | null | test/distributed/elastic/rendezvous/utils_test.py | guoyejun/pytorch | 57cba8e60116bfff37d10bc2b4596d8c478ffd0a | [
"Intel"
] | null | null | null | test/distributed/elastic/rendezvous/utils_test.py | guoyejun/pytorch | 57cba8e60116bfff37d10bc2b4596d8c478ffd0a | [
"Intel"
] | 1 | 2022-02-23T02:34:50.000Z | 2022-02-23T02:34:50.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
import socket
from datetime import timedelta
from typing import List
from unittest import TestCase
from torch.distributed.elastic.rendezvous.utils import (
_PeriodicTimer,
_delay,
_matches_machine_hostname,
_parse_rendezvous_config,
_try_parse_port,
parse_rendezvous_endpoint,
)
class UtilsTest(TestCase):
def test_parse_rendezvous_config_returns_dict(self) -> None:
expected_config = {
"a": "dummy1",
"b": "dummy2",
"c": "dummy3=dummy4",
"d": "dummy5/dummy6",
}
config = _parse_rendezvous_config(
" b= dummy2 ,c=dummy3=dummy4, a =dummy1,d=dummy5/dummy6"
)
self.assertEqual(config, expected_config)
def test_parse_rendezvous_returns_empty_dict_if_str_is_empty(self) -> None:
config_strs = ["", " "]
for config_str in config_strs:
with self.subTest(config_str=config_str):
config = _parse_rendezvous_config(config_str)
self.assertEqual(config, {})
def test_parse_rendezvous_raises_error_if_str_is_invalid(self) -> None:
config_strs = [
"a=dummy1,",
"a=dummy1,,c=dummy2",
"a=dummy1, ,c=dummy2",
"a=dummy1,= ,c=dummy2",
"a=dummy1, = ,c=dummy2",
"a=dummy1, =,c=dummy2",
" , ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration string must be in format "
r"<key1>=<value1>,...,<keyN>=<valueN>.$",
):
_parse_rendezvous_config(config_str)
def test_parse_rendezvous_raises_error_if_value_is_empty(self) -> None:
config_strs = [
"b=dummy1,a,c=dummy2",
"b=dummy1,c=dummy2,a",
"b=dummy1,a=,c=dummy2",
" a ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'a' must have a value specified.$",
):
_parse_rendezvous_config(config_str)
def test_try_parse_port_returns_port(self) -> None:
port = _try_parse_port("123")
self.assertEqual(port, 123)
def test_try_parse_port_returns_none_if_str_is_invalid(self) -> None:
port_strs = [
"",
" ",
" 1",
"1 ",
" 1 ",
"abc",
]
for port_str in port_strs:
with self.subTest(port_str=port_str):
port = _try_parse_port(port_str)
self.assertIsNone(port)
def test_parse_rendezvous_endpoint_returns_tuple(self) -> None:
endpoints = [
"dummy.com:0",
"dummy.com:123",
"dummy.com:65535",
"dummy-1.com:0",
"dummy-1.com:123",
"dummy-1.com:65535",
"123.123.123.123:0",
"123.123.123.123:123",
"123.123.123.123:65535",
"[2001:db8::1]:0",
"[2001:db8::1]:123",
"[2001:db8::1]:65535",
]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host, expected_port = endpoint.rsplit(":", 1)
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, int(expected_port))
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_has_no_port(
self,
) -> None:
endpoints = ["dummy.com", "dummy-1.com", "123.123.123.123", "[2001:db8::1]"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host = endpoint
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_is_empty(self) -> None:
endpoints = ["", " "]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint("", default_port=123)
self.assertEqual(host, "localhost")
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_raises_error_if_hostname_is_invalid(
self,
) -> None:
endpoints = ["~", "dummy.com :123", "~:123", ":123"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The hostname of the rendezvous endpoint '{endpoint}' must be a "
r"dot-separated list of labels, an IPv4 address, or an IPv6 address.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_invalid(self) -> None:
endpoints = ["dummy.com:", "dummy.com:abc", "dummy.com:-123", "dummy.com:-"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_too_big(self) -> None:
endpoints = ["dummy.com:65536", "dummy.com:70000"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_matches_machine_hostname_returns_true_if_hostname_is_loopback(
self,
) -> None:
hosts = [
"localhost",
"127.0.0.1",
"::1",
"0000:0000:0000:0000:0000:0000:0000:0001",
]
for host in hosts:
with self.subTest(host=host):
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_hostname(
self,
) -> None:
host = socket.gethostname()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_fqdn(
self,
) -> None:
host = socket.getfqdn()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_address(
self,
) -> None:
addr_list = socket.getaddrinfo(
socket.gethostname(), None, proto=socket.IPPROTO_TCP
)
for addr in (addr_info[4][0] for addr_info in addr_list):
with self.subTest(addr=addr):
self.assertTrue(_matches_machine_hostname(addr))
def test_matches_machine_hostname_returns_false_if_hostname_does_not_match(
self,
) -> None:
hosts = ["dummy", "0.0.0.0", "::2"]
for host in hosts:
with self.subTest(host=host):
self.assertFalse(_matches_machine_hostname(host))
def test_delay_suspends_thread(self) -> None:
for seconds in 0.2, (0.2, 0.4):
with self.subTest(seconds=seconds):
time1 = time.monotonic()
_delay(seconds) # type: ignore[arg-type]
time2 = time.monotonic()
self.assertGreaterEqual(time2 - time1, 0.2)
class PeriodicTimerTest(TestCase):
def test_start_can_be_called_only_once(self):
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
with self.assertRaisesRegex(RuntimeError, r"^The timer has already started.$"):
timer.start()
timer.cancel()
def test_cancel_can_be_called_multiple_times(self):
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
timer.cancel()
timer.cancel()
def test_cancel_stops_background_thread(self):
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
self.assertTrue(any(t.name == "PeriodicTimer" for t in threading.enumerate()))
timer.cancel()
self.assertTrue(all(t.name != "PeriodicTimer" for t in threading.enumerate()))
def test_delete_stops_background_thread(self):
timer = _PeriodicTimer(timedelta(seconds=1), lambda: None)
timer.start()
self.assertTrue(any(t.name == "PeriodicTimer" for t in threading.enumerate()))
del timer
self.assertTrue(all(t.name != "PeriodicTimer" for t in threading.enumerate()))
def test_timer_calls_background_thread_at_regular_intervals(self):
timer_begin_time: float
# Call our function every 200ms.
call_interval = 0.2
# Keep the log of intervals between each consecutive call.
actual_call_intervals: List[float] = []
# Keep the number of times the function was called.
call_count = 0
# In order to prevent a flaky test instead of asserting that the
# function was called an exact number of times we use a lower bound
# that is guaranteed to be true for a correct implementation.
min_required_call_count = 4
timer_stop_event = threading.Event()
def log_call(self):
nonlocal timer_begin_time, call_count
actual_call_intervals.append(time.monotonic() - timer_begin_time)
call_count += 1
if call_count == min_required_call_count:
timer_stop_event.set()
timer_begin_time = time.monotonic()
timer = _PeriodicTimer(timedelta(seconds=call_interval), log_call, self)
timer_begin_time = time.monotonic()
timer.start()
# Although this is theoretically non-deterministic, if our timer, which
# has a 200ms call interval, does not get called 4 times in 60 seconds,
# there is very likely something else going on.
timer_stop_event.wait(60)
timer.cancel()
self.longMessage = False
self.assertGreaterEqual(
call_count,
min_required_call_count,
f"The function has been called {call_count} time(s) but expected to be called at least "
f"{min_required_call_count} time(s).",
)
for actual_call_interval in actual_call_intervals:
self.assertGreaterEqual(
actual_call_interval,
call_interval,
f"The interval between two function calls was {actual_call_interval} second(s) but "
f"expected to be at least {call_interval} second(s).",
)
| 33.315642 | 100 | 0.595288 |
7954d0d2712af20ea1c08fd593a0314c5874b7cd | 2,331 | py | Python | sahara/tests/unit/base.py | openstack/sahara | c4f4d29847d5bcca83d49ef7e9a3378458462a79 | [
"Apache-2.0"
] | 161 | 2015-01-05T11:46:42.000Z | 2022-01-05T07:41:39.000Z | sahara_plugin_storm/tests/unit/base.py | openstack/sahara-plugin-storm | 960d5f6e49e3f27e2f40939df02d8e8ed0888bb5 | [
"Apache-2.0"
] | 1 | 2021-01-28T06:06:41.000Z | 2021-01-28T06:06:43.000Z | sahara_plugin_storm/tests/unit/base.py | openstack/sahara-plugin-storm | 960d5f6e49e3f27e2f40939df02d8e8ed0888bb5 | [
"Apache-2.0"
] | 118 | 2015-01-29T06:34:35.000Z | 2021-12-06T07:30:09.000Z | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslotest import base
from sahara import context
from sahara.db import api as db_api
from sahara import main
from sahara.utils import rpc
class SaharaTestCase(base.BaseTestCase):
def setUp(self):
super(SaharaTestCase, self).setUp()
self.setup_context()
rpc.setup('all-in-one')
def setup_context(self, username="test_user", tenant_id="tenant_1",
auth_token="test_auth_token", tenant_name='test_tenant',
service_catalog=None, **kwargs):
self.addCleanup(context.set_ctx,
context.ctx() if context.has_ctx() else None)
context.set_ctx(context.Context(
username=username, tenant_id=tenant_id,
auth_token=auth_token, service_catalog=service_catalog or {},
tenant_name=tenant_name, **kwargs))
def override_config(self, name, override, group=None):
main.CONF.set_override(name, override, group)
self.addCleanup(main.CONF.clear_override, name, group)
class SaharaWithDbTestCase(SaharaTestCase):
def setUp(self):
super(SaharaWithDbTestCase, self).setUp()
self.override_config('connection', "sqlite://", group='database')
db_api.setup_db()
self.addCleanup(db_api.drop_db)
class _ConsecutiveThreadGroup(context.ThreadGroup):
def __init__(self, _thread_pool_size=1000):
pass
def spawn(self, thread_description, func, *args, **kwargs):
func(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *ex):
pass
def mock_thread_group(func):
return mock.patch('sahara.context.ThreadGroup',
new=_ConsecutiveThreadGroup)(func)
| 31.08 | 78 | 0.687259 |
7954d23dc02c93159315e4220ec2db0289fddb44 | 8,866 | py | Python | fluid/ocr_recognition/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 1 | 2018-09-12T09:36:44.000Z | 2018-09-12T09:36:44.000Z | fluid/ocr_recognition/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | null | null | null | fluid/ocr_recognition/train.py | phlrain/models | 59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea | [
"Apache-2.0"
] | 2 | 2018-06-14T13:59:36.000Z | 2018-11-14T12:34:47.000Z | """Trainer for OCR CTC or attention model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data
import paddle.fluid.profiler as profiler
from crnn_ctc_model import ctc_train_net
from attention_model import attention_train_net
import data_reader
import argparse
import functools
import sys
import time
import os
import numpy as np
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('total_step', int, 720000, "The number of iterations. Zero or less means whole training set. More than 0 means the training set might be looped until # of iterations is reached.")
add_arg('log_period', int, 1000, "Log period.")
add_arg('save_model_period', int, 15000, "Save model period. '-1' means never saving the model.")
add_arg('eval_period', int, 15000, "Evaluate period. '-1' means never evaluating the model.")
add_arg('save_model_dir', str, "./models", "The directory the model to be saved to.")
add_arg('model', str, "crnn_ctc", "Which type of network to be used. 'crnn_ctc' or 'attention'")
add_arg('init_model', str, None, "The init model file of directory.")
add_arg('use_gpu', bool, True, "Whether use GPU to train.")
add_arg('min_average_window',int, 10000, "Min average window.")
add_arg('max_average_window',int, 12500, "Max average window. It is proposed to be set as the number of minibatch in a pass.")
add_arg('average_window', float, 0.15, "Average window.")
add_arg('parallel', bool, False, "Whether use parallel training.")
add_arg('profile', bool, False, "Whether to use profiling.")
add_arg('skip_batch_num', int, 0, "The number of first minibatches to skip as warm-up for better performance test.")
add_arg('skip_test', bool, False, "Whether to skip test phase.")
# yapf: enable
def train(args):
"""OCR training"""
if args.model == "crnn_ctc":
train_net = ctc_train_net
get_feeder_data = get_ctc_feeder_data
else:
train_net = attention_train_net
get_feeder_data = get_attention_feeder_data
num_classes = None
train_images = None
train_list = None
test_images = None
test_list = None
num_classes = data_reader.num_classes(
) if num_classes is None else num_classes
data_shape = data_reader.data_shape()
# define network
sum_cost, error_evaluator, inference_program, model_average = train_net(
args, data_shape, num_classes)
# data reader
train_reader = data_reader.train(
args.batch_size,
train_images_dir=train_images,
train_list_file=train_list,
cycle=args.total_step > 0,
model=args.model)
test_reader = data_reader.test(
test_images_dir=test_images, test_list_file=test_list, model=args.model)
# prepare environment
place = fluid.CPUPlace()
if args.use_gpu:
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
if 'ce_mode' in os.environ:
fluid.default_startup_program().random_seed = 90
exe.run(fluid.default_startup_program())
# load init model
if args.init_model is not None:
model_dir = args.init_model
model_file_name = None
if not os.path.isdir(args.init_model):
model_dir = os.path.dirname(args.init_model)
model_file_name = os.path.basename(args.init_model)
fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name)
print("Init model from: %s." % args.init_model)
train_exe = exe
error_evaluator.reset(exe)
if args.parallel:
train_exe = fluid.ParallelExecutor(
use_cuda=True if args.use_gpu else False, loss_name=sum_cost.name)
fetch_vars = [sum_cost] + error_evaluator.metrics
def train_one_batch(data):
var_names = [var.name for var in fetch_vars]
if args.parallel:
results = train_exe.run(var_names,
feed=get_feeder_data(data, place))
results = [np.array(result).sum() for result in results]
else:
results = train_exe.run(feed=get_feeder_data(data, place),
fetch_list=fetch_vars)
results = [result[0] for result in results]
return results
def test(iter_num):
error_evaluator.reset(exe)
for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe)
print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % (
time.time(), iter_num, str(test_seq_error[0])))
#Note: The following logs are special for CE monitoring.
#Other situations do not need to care about these logs.
print("kpis test_acc %f" % (1 - test_seq_error[0]))
def save_model(args, exe, iter_num):
filename = "model_%05d" % iter_num
fluid.io.save_params(
exe, dirname=args.save_model_dir, filename=filename)
print("Saved model to: %s/%s." % (args.save_model_dir, filename))
iter_num = 0
stop = False
start_time = time.time()
while not stop:
total_loss = 0.0
total_seq_error = 0.0
batch_times = []
# train a pass
for data in train_reader():
if args.total_step > 0 and iter_num == args.total_step + args.skip_batch_num:
stop = True
break
if iter_num < args.skip_batch_num:
print("Warm-up iteration")
if iter_num == args.skip_batch_num:
profiler.reset_profiler()
start = time.time()
results = train_one_batch(data)
batch_time = time.time() - start
fps = args.batch_size / batch_time
batch_times.append(batch_time)
total_loss += results[0]
total_seq_error += results[2]
iter_num += 1
# training log
if iter_num % args.log_period == 0:
print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" % (
time.time(), iter_num,
total_loss / (args.log_period * args.batch_size),
total_seq_error / (args.log_period * args.batch_size)))
print("kpis train_cost %f" % (total_loss / (args.log_period *
args.batch_size)))
print("kpis train_acc %f" % (
1 - total_seq_error / (args.log_period * args.batch_size)))
total_loss = 0.0
total_seq_error = 0.0
# evaluate
if not args.skip_test and iter_num % args.eval_period == 0:
if model_average:
with model_average.apply(exe):
test(iter_num)
else:
test(iter_num)
# save model
if iter_num % args.save_model_period == 0:
if model_average:
with model_average.apply(exe):
save_model(args, exe, iter_num)
else:
save_model(args, exe, iter_num)
end_time = time.time()
print("kpis train_duration %f" % (end_time - start_time))
# Postprocess benchmark data
latencies = batch_times[args.skip_batch_num:]
latency_avg = np.average(latencies)
latency_pc99 = np.percentile(latencies, 99)
fpses = np.divide(args.batch_size, latencies)
fps_avg = np.average(fpses)
fps_pc99 = np.percentile(fpses, 1)
# Benchmark output
print('\nTotal examples (incl. warm-up): %d' %
(iter_num * args.batch_size))
print('average latency: %.5f s, 99pc latency: %.5f s' % (latency_avg,
latency_pc99))
print('average fps: %.5f, fps for 99pc latency: %.5f' % (fps_avg,
fps_pc99))
def main():
args = parser.parse_args()
print_arguments(args)
if args.profile:
if args.use_gpu:
with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
train(args)
else:
with profiler.profiler("CPU", sorted_key='total') as cpuprof:
train(args)
else:
train(args)
if __name__ == "__main__":
main()
| 40.484018 | 199 | 0.604557 |
7954d2425049f7814699c4832275e2531ce41cf8 | 6,107 | py | Python | bot/ext/tabletop/tictactoe.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | bot/ext/tabletop/tictactoe.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | bot/ext/tabletop/tictactoe.py | Rickaym/Nexus-Bot | 86b17ca5f7cce10eae981157971990875eda0a06 | [
"MIT"
] | null | null | null | from discord import ui
from discord.embeds import Embed
from discord.enums import ButtonStyle
from discord.ext import commands
from discord.interactions import Interaction
from asyncio.exceptions import TimeoutError
from discord.ui.button import Button
from bot.utils.shadchan import MatchInstance, MatchModes, Pool, MatchOptions
from bot.utils.hearsay import Hearsay
from string import ascii_uppercase
class TicTacToeUi(ui.View):
"""
A1 B1 C1
A2 B2 C2
A3 B3 C3
"""
def __init__(self, player, match: MatchInstance, symbol, loop):
super().__init__(timeout=180)
self.player = player
self.match = match
self.symbol = symbol
self.loop = loop
self.started = False
self.won = False
self.board = [[], [], []]
for c_id in ("A1", "B1", "C1", "A2", "B2", "C2", "A3", "B3", "C3"):
i = Button(label="\u200b", style=ButtonStyle.gray,
custom_id=c_id, row=int(c_id[1]))
self.board[ascii_uppercase.index(c_id[0])].append(i)
self.add_item(i)
async def interaction_check(self, i: Interaction) -> bool:
if not self.started or i.user.id != self.player.id:
return False
custom_id = i.data["custom_id"]
for item in self.children:
item.disabled = True
if item.custom_id == custom_id:
item.emoji = self.symbol
item.style = ButtonStyle.blurple
won, line = self.get_gamestate(custom_id, self.symbol)
if won:
self.won = True
for z in line:
z.style = ButtonStyle.green
self.stop()
self.loop.create_task(i.message.edit(view=self))
self.match.emit("press_button", custom_id, self.player, self.symbol, self.won, line)
return True
async def enemy_press(self, custom_id, message, symbol, enemy_won, line):
if enemy_won is True:
self.stop()
for item in self.children:
if item.custom_id == custom_id:
item.disabled = True
item.emoji = symbol
item.style = ButtonStyle.blurple
if enemy_won is True:
if item.custom_id in [i.custom_id for i in line]:
item.style = ButtonStyle.danger
item.disabled = True
else:
if item.style == ButtonStyle.gray:
item.disabled = False
self.loop.create_task(message.edit(view=self))
def get_gamestate(self, custom_id, symbol):
"""
A1 B1 C1
A2 B2 C2
A3 B3 C3
"""
line = []
col, row = ascii_uppercase.index(custom_id[0]), int(custom_id[1])-1
for i in range(3):
if str(self.board[col][i].emoji) != symbol:
break
else:
line.append(self.board[col][i])
if i == 2:
return True, line
line = []
for i in range(3):
if str(self.board[i][row].emoji) != symbol:
break
else:
line.append(self.board[i][row])
if i == 2:
return True, line
line = []
if col == row:
for i in range(3):
if str(self.board[i][i].emoji) != symbol:
break
else:
line.append(self.board[i][i])
if i == 2:
return True, line
line = []
if col+row == 2:
for i in range(3):
if str(self.board[i][2-i].emoji) != symbol:
break
else:
line.append(self.board[i][2-i])
if i == 2:
return True, line
return False, None
class TicTacToe(commands.Cog):
ttt_SID = "tic_tac_toe___"
symbol_AID = "tttemoji"
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
async def start_tictactoe(self, commander, ctx, match: MatchInstance):
game = TicTacToeUi(commander, match, (await Hearsay.resolve_asset(commander, self.symbol_AID)) or "⭕", self.bot.loop)
msg = await ctx.send(embed=Embed(description=f"*You are {game.symbol}! First person to click starts.*", color=0x2F3136), view=game)
@match.on_emit("press_button")
async def press_button(custom_id, player, symbol, won, line):
if player.id != commander.id:
if symbol == game.symbol:
symbol = '❌'
await game.enemy_press(custom_id, msg, symbol, won, line)
game.started = True
for i in range(9):
# press_button is dispatched from inside the ui
try:
await match.wait_for("press_button", 20)
except TimeoutError:
await ctx.send("Timed out! A turn has been skipped.")
break
res = await match.conclude(game.won, lambda a, b: a or b)
if res is True:
if game.won is True:
await ctx.send(embed=Embed(description=f"*You've won!*", color=0x2F3136))
else:
await ctx.send(embed=Embed(description=f"*You've lost!*", color=0x2F3136))
break
if not res:
await ctx.send(embed=Embed(description=f"*It's a tie.*", color=0x2F3136))
await match.enable_chat(self.bot, 45)
return match.end()
@commands.command(name="view")
async def vvv(self, ctx):
v = TicTacToeUi(ctx.author)
await ctx.send("test", view=v)
v.started = True
await v.wait()
@commands.command(name="tictactoe", aliases=("ttt",))
async def tictactoe(self, ctx):
await Pool.get(self.ttt_SID).lineup(ctx.author, ctx, self.bot.loop,
lambda *args: self.bot.loop.create_task(self.start_tictactoe(*args)),
MatchOptions(MatchModes.gvg))
def setup(bot):
bot.add_cog(TicTacToe(bot))
print("Loaded TicTactoe.cog") | 35.300578 | 139 | 0.54544 |
7954d37059baf4c9e974e983518556b976230d4f | 401 | py | Python | populate.py | NealWhitlock/auto_populate | 9e3d960dc715c9b57abcc4de54473f217398ea07 | [
"MIT"
] | null | null | null | populate.py | NealWhitlock/auto_populate | 9e3d960dc715c9b57abcc4de54473f217398ea07 | [
"MIT"
] | null | null | null | populate.py | NealWhitlock/auto_populate | 9e3d960dc715c9b57abcc4de54473f217398ea07 | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from getter import ingredient_getter
app = FastAPI()
class Item(BaseModel):
word: str
@app.post("/populate/{word}")
async def feature(item: Item):
item = jsonable_encoder(item)
word_string = item['word']
results_json = ingredient_getter(word_string)
return results_json
| 20.05 | 49 | 0.755611 |
7954d38e693efe735aee351a4ddb68b5f8bb0135 | 1,429 | py | Python | locations/spiders/winn_dixie.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/winn_dixie.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/winn_dixie.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from locations.items import GeojsonPointItem
class FiestaMartSpider(scrapy.Spider):
name = "winndixie"
item_attributes = {"brand": "Winn Dixie"}
allowed_domains = ["winndixie.com"]
start_urls = [
"https://www.winndixie.com/locator",
]
def start_requests(self):
template = "https://www.winndixie.com/V2/storelocator/getStores?search=jacksonville,%20fl&strDefaultMiles=1000&filter="
headers = {
"Accept": "application/json",
}
yield scrapy.http.FormRequest(
url=template, method="GET", headers=headers, callback=self.parse
)
def parse(self, response):
jsonresponse = response.json()
for store in jsonresponse:
properties = {
"name": store["StoreName"],
"ref": store["StoreCode"],
"addr_full": store["Address"]["AddressLine2"],
"city": store["Address"]["City"],
"state": store["Address"]["State"],
"postcode": store["Address"]["Zipcode"],
"country": store["Address"]["Country"],
"phone": store["Phone"],
"lat": float(store["Address"]["Latitude"]),
"lon": float(store["Address"]["Longitude"]),
"website": response.url,
}
yield GeojsonPointItem(**properties)
| 31.755556 | 127 | 0.549335 |
7954d3ed19c77ea5d54f5daa08cc58940c630754 | 10,944 | py | Python | pprp/crypto_2.py | Avalon913/FGODailyBonus | 6dbb46485b536361b99a098a500a62d3f73931c0 | [
"MIT"
] | 18 | 2021-09-01T09:39:45.000Z | 2022-03-29T13:37:39.000Z | pprp/crypto_2.py | Avalon913/FGODailyBonus | 6dbb46485b536361b99a098a500a62d3f73931c0 | [
"MIT"
] | 4 | 2021-09-09T03:51:25.000Z | 2022-02-24T10:48:02.000Z | pprp/crypto_2.py | Avalon913/FGODailyBonus | 6dbb46485b536361b99a098a500a62d3f73931c0 | [
"MIT"
] | 48 | 2021-09-01T05:32:02.000Z | 2022-03-29T04:53:36.000Z | """
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in range(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in range(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i-1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in range(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in range(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def test():
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
t(16, 16)
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
| 29.029178 | 117 | 0.426992 |
7954d41a1866d33c8a8539dbdf2b54758cc13843 | 7,535 | py | Python | Collect/JRC/DataAccess.py | ali1100/wa | 700e5014533c45f38a245c3abdeacc537cb307bc | [
"Apache-2.0"
] | 16 | 2017-04-27T21:22:37.000Z | 2020-10-21T12:57:03.000Z | Collect/JRC/DataAccess.py | ali1100/wa | 700e5014533c45f38a245c3abdeacc537cb307bc | [
"Apache-2.0"
] | 1 | 2017-06-17T08:07:53.000Z | 2017-08-22T12:28:37.000Z | Collect/JRC/DataAccess.py | wateraccounting/wa | 29ed8e7eac732135678a5d171cd5e53a54c95313 | [
"Apache-2.0"
] | 19 | 2016-10-24T13:24:34.000Z | 2020-02-03T17:42:22.000Z | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/JRC
"""
# import general python modules
import os
import numpy as np
import shutil
# Water Accounting modules
import wa.General.raster_conversions as RC
import wa.General.data_conversions as DC
def DownloadData(Dir,latlim, lonlim, Waitbar):
"""
This function downloads JRC data
Keyword arguments:
Dir -- 'C:/file/to/path/'
latlim -- [ymin, ymax] (values must be between -90 and 90)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Waitbar -- 1 (Default) will print a waitbar
"""
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print 'Latitude above 90N or below 90S is not possible. Value set to maximum'
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print 'Longitude must be between 180E and 180W. Now value is set to maximum'
lonlim[0] = np.max(lonlim[0], -180)
lonlim[1] = np.min(lonlim[1], 180)
# Make directory for the JRC water occurrence data
Dir = Dir.replace("/", os.sep)
output_folder = os.path.join(Dir, 'JRC', 'Occurrence')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
fileName_out = os.path.join(output_folder, 'JRC_Occurrence_percent.tif')
if not os.path.exists(fileName_out):
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = 1
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# This function defines the name of dataset that needs to be collected
Names_to_download = Tiles_to_download(lonlim,latlim)
# Pass variables to parallel function and run
args = [output_folder, Names_to_download, lonlim, latlim]
RetrieveData(args)
if Waitbar == 1:
amount = 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
else:
print 'JRC water occurrence map already exists'
return()
def RetrieveData(args):
"""
This function retrieves JRC data for a given date from the
http://storage.googleapis.com/global-surface-water/downloads/ server.
Keyword arguments:
args -- A list of parameters defined in the DownloadData function.
"""
# Argument
[output_folder, Names_to_download, lonlim, latlim] = args
# Collect the data from the JRC webpage and returns the data and lat and long in meters of those tiles
try:
Collect_data(Names_to_download, output_folder)
except:
print "Was not able to download the file"
# Clip the data to the users extend
if len(Names_to_download) == 1:
trash_folder = os.path.join(output_folder, "Trash")
data_in = os.path.join(trash_folder, Names_to_download[0])
data_end, geo_end = RC.clip_data(data_in, latlim, lonlim)
else:
data_end = np.zeros([int((latlim[1] - latlim[0])/0.00025), int((lonlim[1] - lonlim[0])/0.00025)])
for Name_to_merge in Names_to_download:
trash_folder = os.path.join(output_folder, "Trash")
data_in = os.path.join(trash_folder, Name_to_merge)
geo_out, proj, size_X, size_Y = RC.Open_array_info(data_in)
lat_min_merge = np.maximum(latlim[0], geo_out[3] + size_Y * geo_out[5])
lat_max_merge = np.minimum(latlim[1], geo_out[3])
lon_min_merge = np.maximum(lonlim[0], geo_out[0])
lon_max_merge = np.minimum(lonlim[1], geo_out[0] + size_X * geo_out[1])
lonmerge = [lon_min_merge, lon_max_merge]
latmerge = [lat_min_merge, lat_max_merge]
data_one, geo_one = RC.clip_data(data_in, latmerge, lonmerge)
Ystart = int((geo_one[3] - latlim[1])/geo_one[5])
Yend = int(Ystart + np.shape(data_one)[0])
Xstart = int((geo_one[0] - lonlim[0])/geo_one[1])
Xend = int(Xstart + np.shape(data_one)[1])
data_end[Ystart:Yend, Xstart:Xend] = data_one
geo_end = tuple([lonlim[0], geo_one[1], 0, latlim[1], 0, geo_one[5]])
# Save results as Gtiff
fileName_out = os.path.join(output_folder, 'JRC_Occurrence_percent.tif')
DC.Save_as_tiff(name=fileName_out, data=data_end, geo=geo_end, projection='WGS84')
shutil.rmtree(trash_folder)
return True
def Tiles_to_download(lonlim, latlim):
'''
Defines the JRC tiles that must be downloaded in order to cover the latitude and longitude limits
Keywords arguments:
lonlim -- [ymin, ymax] (longitude limits of the chunk or whole image)
latlim -- [ymin, ymax] (latitude limits of the chunk or whole image)
'''
latmin = int(np.floor(latlim[0]/10.)*10)
latmax = int(np.ceil(latlim[1]/10.)*10)
lonmin = int(np.floor(lonlim[0]/10.)*10)
lonmax = int(np.ceil(lonlim[1]/10.)*10)
lat_steps = range(latmin + 10 , latmax + 10 , 10)
lon_steps = range(lonmin, lonmax, 10)
Names_to_download = []
for lon_step in lon_steps:
if lon_step < 0:
string_long = "%sW" %abs(lon_step)
else:
string_long = "%sE" %lon_step
for lat_step in lat_steps:
if lat_step < 0:
string_lat = "%sS" %abs(lat_step)
else:
string_lat = "%sN" %lat_step
Name_to_download = "occurrence_%s_%s.tif" %(string_long, string_lat)
Names_to_download = np.append(Name_to_download, Names_to_download)
return(Names_to_download)
def Collect_data(Names_to_download, output_folder):
'''
This function downloads all the needed JRC tiles from http://e4ftl01.cr.usgs.gov/MOLT/MOD13Q1.006/ as a hdf file.
Keywords arguments:
TilesHorizontal -- [TileMin,TileMax] max and min horizontal tile number
TilesVertical -- [TileMin,TileMax] max and min vertical tile number
Date -- 'yyyy-mm-dd'
output_folder -- 'C:/file/to/path/'
'''
import urllib
for Name_to_download in Names_to_download:
output_Trash = os.path.join(output_folder, "Trash")
if not os.path.exists(output_Trash):
os.mkdir(output_Trash)
filename = os.path.join(output_Trash, Name_to_download)
if not os.path.exists(filename):
times = 0
size = 0
while times < 10 and size < 10000:
url = "http://storage.googleapis.com/global-surface-water/downloads/occurrence/" + Name_to_download
code = urllib.urlopen(url).getcode()
if (code != 404):
urllib.urlretrieve(url, filename)
times += 1
statinfo = os.stat(filename)
size = int(statinfo.st_size)
else:
print url + " not found"
times = 10
return()
| 37.864322 | 117 | 0.603583 |
7954d48729c7910a3dd39b88b9dc6690453eb42b | 816 | py | Python | tests/test_examples/test_result/test_result_pattern_matching.py | internetimagery/returns | 8f4f23bae6861fb3969a8d0c8979f96c5589fc61 | [
"BSD-2-Clause"
] | null | null | null | tests/test_examples/test_result/test_result_pattern_matching.py | internetimagery/returns | 8f4f23bae6861fb3969a8d0c8979f96c5589fc61 | [
"BSD-2-Clause"
] | 92 | 2022-01-03T01:14:21.000Z | 2022-03-30T00:32:09.000Z | tests/test_examples/test_result/test_result_pattern_matching.py | thepabloaguilar/returns | 5fe76feb9cfd4d9535306a2da0b1dded25fd29f6 | [
"BSD-2-Clause"
] | null | null | null | from returns.result import Failure, Success, safe
@safe
def div(first_number: int, second_number: int) -> int:
return first_number // second_number
match div(1, 0):
# Matches if the result stored inside `Success` is `10`
case Success(10):
print('Result is "10"')
# Same as above but using match-by-name
case Success(inner_value=20):
print('Result is "20"')
# Matches any `Success` instance and binds its value to the `value` variable
case Success(value):
print('Result is "{0}"'.format(value))
# Matches if the result stored inside `Failure` is `ZeroDivisionError`
case Failure(ZeroDivisionError):
print('"ZeroDivisionError" was raised')
# Matches any `Failure` instance
case Failure(_):
print('The division was a failure')
| 28.137931 | 80 | 0.665441 |
7954d596c754744a3b7737456a9ca3340506c492 | 170 | py | Python | katas/kyu_7/some_circles.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/kyu_7/some_circles.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/kyu_7/some_circles.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | from math import pi
OUTPUT = 'We have this much circle: {:.0f}'.format
def sum_circles(*args):
return OUTPUT(sum(pi * (diameter / 2.0) ** 2 for diameter in args))
| 21.25 | 71 | 0.664706 |
7954d596d17d7202cbede7b05191922d40a8bf8d | 178 | py | Python | modularodm/signals.py | felliott/modular-odm | 8a34891892b8af69b21fdc46701c91763a5c1cf9 | [
"Apache-2.0"
] | 4 | 2018-09-21T18:57:03.000Z | 2020-10-19T05:38:14.000Z | modularodm/signals.py | cos-archives/modular-odm | 8a34891892b8af69b21fdc46701c91763a5c1cf9 | [
"Apache-2.0"
] | null | null | null | modularodm/signals.py | cos-archives/modular-odm | 8a34891892b8af69b21fdc46701c91763a5c1cf9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import blinker
signals = blinker.Namespace()
load = signals.signal('load')
before_save = signals.signal('before_save')
save = signals.signal('save')
| 14.833333 | 43 | 0.691011 |
7954d6f484b4e501cff9d380db43252caa0e329c | 5,167 | py | Python | tests/compiler/dataflow/test_model_relations.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 6 | 2021-03-09T10:24:02.000Z | 2022-01-16T03:52:11.000Z | tests/compiler/dataflow/test_model_relations.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 1,319 | 2020-12-18T08:52:29.000Z | 2022-03-31T18:17:32.000Z | tests/compiler/dataflow/test_model_relations.py | inmanta/inmanta-core | ae2153d57f124d00ad1b58e6d4bc6818364be4a8 | [
"Apache-2.0"
] | 4 | 2021-03-03T15:36:50.000Z | 2022-03-11T11:41:51.000Z | """
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import pytest
from compiler.dataflow.conftest import DataflowTestHelper
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("inherit_relation", [True, False])
@pytest.mark.parametrize("assign_first", [True, False])
def test_dataflow_model_relation(
dataflow_test_helper: DataflowTestHelper, bidirectional: bool, inherit_relation: bool, assign_first: bool
) -> None:
relation_stmt: str = "%s.b [1] -- B%s" % ("AParent" if inherit_relation else "A", ".a [1]" if bidirectional else "")
dataflow_test_helper.compile(
"""
entity AParent:
end
implement AParent using std::none
entity A extends AParent:
end
implement A using std::none
entity B:
end
implement B using std::none
%s
a = A()
b = B()
a.b = b
%s
"""
% ("" if assign_first else relation_stmt, relation_stmt if assign_first else "")
)
bidirectional_rule: str = "<instance> b . a -> <instance> a"
dataflow_test_helper.verify_graphstring(
"""
a -> <instance> a
b -> <instance> b
<instance> a . b -> b
%s
"""
% (bidirectional_rule if bidirectional else ""),
)
if not bidirectional:
with pytest.raises(AssertionError):
dataflow_test_helper.verify_graphstring(bidirectional_rule)
dataflow_test_helper.verify_leaves({"a": {"a"}, "b": {"b"}, "a.b": {"b"}, "b.a": {"b.a"}})
def test_dataflow_model_assignment_to_relation(dataflow_test_helper: DataflowTestHelper) -> None:
dataflow_test_helper.compile(
"""
entity X:
end
entity U:
end
entity V:
number n
end
X.u [1] -- U
U.v [1] -- V
implement X using std::none
implement U using std::none
implement V using std::none
n = 42
x = X()
x.u = U()
x.u.v = V()
x.u.v.n = n
""",
)
dataflow_test_helper.verify_graphstring(
"""
x -> <instance> x
<instance> x . u -> <instance> u
<instance> u . v -> <instance> v
<instance> v . n -> n
n -> 42
""",
)
dataflow_test_helper.verify_leaves({"x.u.v.n": {"n"}})
def test_dataflow_model_assignment_from_relation(dataflow_test_helper: DataflowTestHelper) -> None:
dataflow_test_helper.compile(
"""
entity U:
end
entity V:
number n
end
U.v [1] -- V
implement U using std::none
implement V using std::none
n = 42
u = U(v = v)
v = V(n = n)
uvn = u.v.n
""",
)
dataflow_test_helper.verify_graphstring(
"""
n -> 42
u -> <instance> u
v -> <instance> v
<instance> u . v -> v
<instance> v . n -> n
uvn -> u . v . n
""",
)
dataflow_test_helper.verify_leaves({"n": {"n"}, "u": {"u"}, "v": {"v"}, "u.v": {"v"}, "v.n": {"n"}, "uvn": {"n"}})
def test_dataflow_model_index(dataflow_test_helper: DataflowTestHelper) -> None:
dataflow_test_helper.compile(
"""
entity A:
number n
number k
number l
end
index A(n)
implement A using std::none
x = A(n = 42, k = 0)
y = A(n = 42, l = 1)
""",
)
dataflow_test_helper.verify_graphstring(
"""
x -> <instance> x
y -> <instance> y
<instance> x . n -> [ 42 42 ]
<instance> y . n -> [ 42 42 ]
<instance> x . k -> 0
<instance> y . k -> 0
<instance> x . l -> 1
<instance> y . l -> 1
""",
)
def test_dataflow_model_default_attribute(dataflow_test_helper: DataflowTestHelper) -> None:
dataflow_test_helper.compile(
"""
entity A:
number n = 42
end
implement A using std::none
x = A()
y = A(n = 0)
""",
)
dataflow_test_helper.verify_graphstring(
"""
x -> <instance> x
y -> <instance> y
<instance> x . n -> 42
<instance> y . n -> 0
""",
)
@pytest.mark.parametrize("refer_out", [True, False])
def test_dataflow_model_implementation(dataflow_test_helper: DataflowTestHelper, refer_out: bool) -> None:
dataflow_test_helper.compile(
"""
entity A:
number n
end
implementation i for A:
self.n = %s
end
implement A using i
nn = 42
x = A()
"""
% ("nn" if refer_out else 42),
)
dataflow_test_helper.verify_graphstring(
"""
nn -> 42
x -> <instance> x
<instance> x . n -> %s
"""
% ("nn" if refer_out else 42),
)
def test_dataflow_model_unsupported_bidirectional_doesnt_crash(dataflow_test_helper: DataflowTestHelper) -> None:
dataflow_test_helper.compile(
"""
entity A:
end
entity B:
end
implement A using std::none
implement B using std::none
A.b [0:] -- B.a [0:]
a = A()
# Lists are not supported yet. Mustn't crash on trying to model the other side of the bidirectional relation
a.b = [B(), B()]
""",
)
| 20.105058 | 120 | 0.628605 |
7954d73f446c343c4c0fbf1e55bf7095f3d52925 | 5,588 | py | Python | ALGO_BreadthFirstSearch.py | divergent63/DataStructureAlgorithms | 1e378a31beb6ac1b333b560f0bfe2a3daeef7be2 | [
"Apache-2.0"
] | null | null | null | ALGO_BreadthFirstSearch.py | divergent63/DataStructureAlgorithms | 1e378a31beb6ac1b333b560f0bfe2a3daeef7be2 | [
"Apache-2.0"
] | null | null | null | ALGO_BreadthFirstSearch.py | divergent63/DataStructureAlgorithms | 1e378a31beb6ac1b333b560f0bfe2a3daeef7be2 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import math
import DST_Graph
import DST_Queue
# 词梯问题
def BuildWordGraph(file):
with open(file) as f:
word_data = f.readlines()
buckets = {}
for line in word_data:
for i in range(len(line)-1):
bucket = line[:i] + '_' + line[i+1:]
if bucket not in buckets.keys():
buckets[bucket] = []
buckets[bucket].append(line[:-1])
else:
buckets[bucket].append(line[:-1])
word_graph = DST_Graph.Graph()
for word_key in buckets.keys():
for word1 in buckets[word_key]:
if word1 not in word_graph:
word_graph.AddNode(word1)
for word2 in buckets[word_key]:
if word2 not in word_graph:
word_graph.AddNode(word2)
if word1 != word2:
word_graph.AddEdge(word1, word2, weight=None)
return word_graph
def BreadthFirstSearch(word_graph, word_item):
# TODO: Wrong Answer
q = DST_Queue.queue_test()
# for idx, word_item in enumerate(word_graph):
q.push(word_item)
word_item.State = 0
word_item.Dis = 0
word_item.Pre = None
# SetState(word_item, 0)
# SetDistance(word_item, 0)
# SetPre(word_item, None)
while q.__sizeof__() > 0:
word_item.State = 1
next_node_all = q.pop().GetAdjIDs()
for qnext_item in list(next_node_all): # 获取所有邻节点
word_graph.GetNode(qnext_item).State = 0
# SetState(qnext_item, 0)
for qnext_item in list(next_node_all): # 获取所有邻节点
if word_graph.GetNode(qnext_item).State == 0: # 邻节点未探索
q.push(word_graph.GetNode(qnext_item))
word_graph.GetNode(qnext_item).State = 1
word_graph.GetNode(qnext_item).Dis += 1
word_graph.GetNode(qnext_item).Pre = word_item
# SetState(qnext_item, 1)
# SetDistance(qnext_item, idx+1) # 距离加1
# SetPre(qnext_item, word_item) # 设置qnext_item的前驱节点为word_item
word_item = word_graph.GetNode(qnext_item)
word_item.State = 2
return word_graph
def TransversePath(node):
path_lst = [node.GetVID()]
while node.Pre is not None:
path_lst.append(node.Pre.GetVID())
node = node.Pre
return path_lst
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# 返回二维列表[[1,2],[4,5]]
def _DropNone(self, lst):
StayIdx = []
for i in range(len(lst)):
if lst[i] is not None:
StayIdx.append(i)
return [lst[StayIdx[i]] for i in range(len(StayIdx))]
def Print(self, pRoot):
# write code here
if pRoot is None:
return []
CurrentNode = pRoot
CurrentNodeIniDepth = [pRoot]
PrintLst = [CurrentNode.val]
results = []
NextDepthLst = []
# PrintLst.append([CurrentNode.key, CurrentNode.val])
while True:
for i in range(len(CurrentNodeIniDepth)):
if CurrentNodeIniDepth.count(None) == len(CurrentNodeIniDepth):
PrintLstWihtLines = []
for i in range(int(math.log2(len(PrintLst)))):
PrintLstInLine = self._DropNone(PrintLst[int(2**(i)-1):int(2**(i+1))-1])
PrintLstWihtLines.append(PrintLstInLine)
print(PrintLstWihtLines)
return PrintLstWihtLines
if CurrentNodeIniDepth[i]:
PrintLst.append(CurrentNodeIniDepth[i].left.val) if CurrentNodeIniDepth[
i].left is not None else PrintLst.append(
None)
PrintLst.append(CurrentNodeIniDepth[i].right.val) if CurrentNodeIniDepth[
i].right is not None else PrintLst.append(
None)
else:
PrintLst.append(None)
PrintLst.append(None)
# results.append(PrintLst)
for i in range(len(CurrentNodeIniDepth)):
if CurrentNodeIniDepth[i]:
NextDepthLst.append(CurrentNodeIniDepth[i].left) if CurrentNodeIniDepth[
i].left is not None else NextDepthLst.append(
None)
NextDepthLst.append(CurrentNodeIniDepth[i].right) if CurrentNodeIniDepth[
i].right is not None else NextDepthLst.append(
None)
else:
NextDepthLst.append(None)
NextDepthLst.append(None)
CurrentNodeIniDepth = NextDepthLst
NextDepthLst = []
if __name__ == '__main__':
# word_graph = BuildWordGraph('./datasets/fourletterwords.txt')
# BreadthFirstSearch(word_graph, word_graph.GetNode('ABOS'))
# print(TransversePath(word_graph.GetNode('ACID')))
# {8,6,10,5,7,9,11}
pRoot = TreeNode(8)
pRoot.left = TreeNode(6)
pRoot.right = TreeNode(10)
pRoot.left.left = TreeNode(5)
pRoot.left.right = TreeNode(7)
pRoot.right.left = TreeNode(9)
pRoot.right.right = TreeNode(11)
s = Solution()
s.Print(pRoot)
print()
| 34.073171 | 123 | 0.535612 |
7954d8d2ddc97a571c7bfe05ddb0799eca64b525 | 20,642 | py | Python | tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1 | 2021-08-22T21:10:48.000Z | 2021-08-22T21:10:48.000Z | tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 5 | 2020-07-14T10:14:34.000Z | 2020-09-03T01:35:09.000Z | tfx/orchestration/experimental/core/async_pipeline_task_gen_test.py | avelez93/tfx | 75fbb6a7d50e99138609be3ca4c3a204a13a2195 | [
"Apache-2.0"
] | 1 | 2020-12-13T22:07:53.000Z | 2020-12-13T22:07:53.000Z | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.async_pipeline_task_gen."""
import os
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen as asptg
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.testing import test_async_pipeline
from tfx.utils import status as status_lib
from google.protobuf import any_pb2
from ml_metadata.proto import metadata_store_pb2
class AsyncPipelineTaskGeneratorTest(test_utils.TfxTest,
parameterized.TestCase):
def setUp(self):
super().setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
self._pipeline_root = pipeline_root
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
# Sets up the pipeline.
pipeline = test_async_pipeline.create_pipeline()
self._pipeline = pipeline
self._pipeline_info = pipeline.pipeline_info
self._pipeline_runtime_spec = pipeline.runtime_spec
self._pipeline_runtime_spec.pipeline_root.field_value.string_value = (
pipeline_root)
# Extracts components.
self._example_gen = pipeline.nodes[0].pipeline_node
self._transform = pipeline.nodes[1].pipeline_node
self._trainer = pipeline.nodes[2].pipeline_node
self._task_queue = tq.TaskQueue()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
def _is_pure_service_node(unused_pipeline_state, node_id):
return node_id == self._example_gen.node_info.id
def _is_mixed_service_node(unused_pipeline_state, node_id):
return node_id == self._transform.node_info.id
self._mock_service_job_manager.is_pure_service_node.side_effect = (
_is_pure_service_node)
self._mock_service_job_manager.is_mixed_service_node.side_effect = (
_is_mixed_service_node)
def _default_ensure_node_services(unused_pipeline_state, node_id):
self.assertIn(
node_id,
(self._example_gen.node_info.id, self._transform.node_info.id))
return service_jobs.ServiceStatus.RUNNING
self._mock_service_job_manager.ensure_node_services.side_effect = (
_default_ensure_node_services)
def _finish_node_execution(self, use_task_queue, exec_node_task):
"""Simulates successful execution of a node."""
test_utils.fake_execute_node(self._mlmd_connection, exec_node_task)
if use_task_queue:
dequeued_task = self._task_queue.dequeue()
self._task_queue.task_done(dequeued_task)
self.assertEqual(exec_node_task.task_id, dequeued_task.task_id)
def _generate_and_test(self,
use_task_queue,
num_initial_executions,
num_tasks_generated,
num_new_executions,
num_active_executions,
expected_exec_nodes=None,
ignore_update_node_state_tasks=False):
"""Generates tasks and tests the effects."""
return test_utils.run_generator_and_test(
self,
self._mlmd_connection,
asptg.AsyncPipelineTaskGenerator,
self._pipeline,
self._task_queue,
use_task_queue,
self._mock_service_job_manager,
num_initial_executions=num_initial_executions,
num_tasks_generated=num_tasks_generated,
num_new_executions=num_new_executions,
num_active_executions=num_active_executions,
expected_exec_nodes=expected_exec_nodes,
ignore_update_node_state_tasks=ignore_update_node_state_tasks)
@parameterized.parameters(0, 1)
def test_no_tasks_generated_when_no_inputs(self, min_count):
"""Tests no tasks are generated when there are no inputs, regardless of min_count."""
for node in self._pipeline.nodes:
for v in node.pipeline_node.inputs.inputs.values():
v.min_count = min_count
with self._mlmd_connection as m:
pipeline_state = test_utils.get_or_create_pipeline_state(
m, self._pipeline)
task_gen = asptg.AsyncPipelineTaskGenerator(
m, lambda _: False, service_jobs.DummyServiceJobManager())
tasks = task_gen.generate(pipeline_state)
self.assertEmpty(tasks, 'Expected no task generation when no inputs.')
self.assertEmpty(
test_utils.get_non_orchestrator_executions(m),
'There must not be any registered executions since no tasks were '
'generated.')
@parameterized.parameters(False, True)
def test_task_generation(self, use_task_queue):
"""Tests async pipeline task generation.
Args:
use_task_queue: If task queue is enabled, new tasks are only generated if
a task with the same task_id does not already exist in the queue.
`use_task_queue=False` is useful to test the case of task generation
when task queue is empty (for eg: due to orchestrator restart).
"""
# Simulate that ExampleGen has already completed successfully.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Generate once.
[update_example_gen_task, update_transform_task,
exec_transform_task] = self._generate_and_test(
use_task_queue,
num_initial_executions=1,
num_tasks_generated=3,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))
self._mock_service_job_manager.ensure_node_services.assert_has_calls([
mock.call(mock.ANY, self._example_gen.node_info.id),
mock.call(mock.ANY, self._transform.node_info.id)
])
# No new effects if generate called again.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=2,
num_tasks_generated=1 if use_task_queue else 3,
num_new_executions=0,
num_active_executions=1,
expected_exec_nodes=[] if use_task_queue else [self._transform])
if not use_task_queue:
exec_transform_task = tasks[2]
# Mark transform execution complete.
self._finish_node_execution(use_task_queue, exec_transform_task)
# Trainer execution task should be generated next.
[
update_example_gen_task, update_transform_task, update_trainer_task,
exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=2,
num_tasks_generated=4,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Mark the trainer execution complete.
self._finish_node_execution(use_task_queue, exec_trainer_task)
# Only UpdateNodeStateTask are generated as there are no new inputs.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=3,
num_tasks_generated=3,
num_new_executions=0,
num_active_executions=0)
for task in tasks:
self.assertTrue(task_lib.is_update_node_state_task(task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
# Fake another ExampleGen run.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Both transform and trainer tasks should be generated as they both find
# new inputs.
[
update_example_gen_task, update_transform_task, exec_transform_task,
update_trainer_task, exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=4,
num_tasks_generated=5,
num_new_executions=2,
num_active_executions=2,
expected_exec_nodes=[self._transform, self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.RUNNING, update_transform_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_transform_task))
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Re-generation will produce the same tasks when task queue disabled.
tasks = self._generate_and_test(
use_task_queue,
num_initial_executions=6,
num_tasks_generated=1 if use_task_queue else 5,
num_new_executions=0,
num_active_executions=2,
expected_exec_nodes=[]
if use_task_queue else [self._transform, self._trainer])
if not use_task_queue:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_exec_node_task(tasks[2]))
self.assertTrue(task_lib.is_update_node_state_task(tasks[3]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_exec_node_task(tasks[4]))
exec_transform_task = tasks[2]
exec_trainer_task = tasks[4]
else:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
# Mark transform execution complete.
self._finish_node_execution(use_task_queue, exec_transform_task)
# Mark the trainer execution complete.
self._finish_node_execution(use_task_queue, exec_trainer_task)
# Trainer should be triggered again due to transform producing new output.
[
update_example_gen_task, update_transform_task, update_trainer_task,
exec_trainer_task
] = self._generate_and_test(
use_task_queue,
num_initial_executions=6,
num_tasks_generated=4,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._trainer])
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.RUNNING, update_trainer_task.state)
self.assertTrue(task_lib.is_exec_node_task(exec_trainer_task))
# Finally, no new tasks once trainer completes.
self._finish_node_execution(use_task_queue, exec_trainer_task)
[update_example_gen_task, update_transform_task,
update_trainer_task] = self._generate_and_test(
use_task_queue,
num_initial_executions=7,
num_tasks_generated=3,
num_new_executions=0,
num_active_executions=0)
self.assertTrue(task_lib.is_update_node_state_task(update_example_gen_task))
self.assertEqual(pstate.NodeState.RUNNING, update_example_gen_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_transform_task))
self.assertEqual(pstate.NodeState.STARTED, update_transform_task.state)
self.assertTrue(task_lib.is_update_node_state_task(update_trainer_task))
self.assertEqual(pstate.NodeState.STARTED, update_trainer_task.state)
if use_task_queue:
self.assertTrue(self._task_queue.is_empty())
@parameterized.parameters(False, True)
def test_task_generation_when_node_stopped(self, stop_transform):
"""Tests stopped nodes are ignored when generating tasks."""
# Simulate that ExampleGen has already completed successfully.
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
# Generate once.
num_initial_executions = 1
if stop_transform:
num_tasks_generated = 1
num_new_executions = 0
num_active_executions = 0
with self._mlmd_connection as m:
pipeline_state = test_utils.get_or_create_pipeline_state(
m, self._pipeline)
with pipeline_state:
with pipeline_state.node_state_update_context(
task_lib.NodeUid.from_pipeline_node(
self._pipeline, self._transform)) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
else:
num_tasks_generated = 3
num_new_executions = 1
num_active_executions = 1
tasks = self._generate_and_test(
True,
num_initial_executions=num_initial_executions,
num_tasks_generated=num_tasks_generated,
num_new_executions=num_new_executions,
num_active_executions=num_active_executions)
self.assertLen(tasks, num_tasks_generated)
if stop_transform:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)
else:
self.assertTrue(task_lib.is_update_node_state_task(tasks[0]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[0].state)
self.assertTrue(task_lib.is_update_node_state_task(tasks[1]))
self.assertEqual(pstate.NodeState.RUNNING, tasks[1].state)
self.assertTrue(task_lib.is_exec_node_task(tasks[2]))
def test_service_job_failed(self):
"""Tests task generation when example-gen service job fails."""
def _ensure_node_services(unused_pipeline_state, node_id):
self.assertEqual('my_example_gen', node_id)
return service_jobs.ServiceStatus.FAILED
self._mock_service_job_manager.ensure_node_services.side_effect = (
_ensure_node_services)
[update_task] = self._generate_and_test(
True,
num_initial_executions=0,
num_tasks_generated=1,
num_new_executions=0,
num_active_executions=0)
self.assertTrue(task_lib.is_update_node_state_task(update_task))
self.assertEqual(status_lib.Code.ABORTED, update_task.status.code)
def test_triggering_upon_exec_properties_change(self):
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
[exec_transform_task] = self._generate_and_test(
False,
num_initial_executions=1,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
# Fail the registered execution.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.last_known_state = metadata_store_pb2.Execution.FAILED
# Try to generate with same execution properties. This should not trigger
# as there are no changes since last run.
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=0,
num_new_executions=0,
num_active_executions=0,
ignore_update_node_state_tasks=True)
# Change execution properties of last run.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.custom_properties['a_param'].int_value = 20
# Generating with different execution properties should trigger.
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
def test_triggering_upon_executor_spec_change(self):
test_utils.fake_example_gen_run(self._mlmd_connection, self._example_gen, 1,
1)
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(1)
[exec_transform_task] = self._generate_and_test(
False,
num_initial_executions=1,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
# Fail the registered execution.
with self._mlmd_connection as m:
with mlmd_state.mlmd_execution_atomic_op(
m, exec_transform_task.execution_id) as execution:
execution.last_known_state = metadata_store_pb2.Execution.FAILED
# Try to generate with same executor spec. This should not trigger as
# there are no changes since last run.
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(1)
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=0,
num_new_executions=0,
num_active_executions=0,
ignore_update_node_state_tasks=True)
# Generating with a different executor spec should trigger.
with mock.patch.object(task_gen_utils,
'get_executor_spec') as mock_get_executor_spec:
mock_get_executor_spec.side_effect = _fake_executor_spec(2)
self._generate_and_test(
False,
num_initial_executions=2,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._transform],
ignore_update_node_state_tasks=True)
def _fake_executor_spec(val):
def _get_executor_spec(*unused_args, **unused_kwargs):
value = metadata_store_pb2.Value(int_value=val)
any_proto = any_pb2.Any()
any_proto.Pack(value)
return any_proto
return _get_executor_spec
if __name__ == '__main__':
tf.test.main()
| 42.212679 | 89 | 0.734425 |
7954d8eb867e6b05e58aeb637ab70133f64edc4a | 3,004 | py | Python | tensorflow_gan/examples/self_attention_estimator/discriminator_test.py | sanidhyamangal/gan | 540ab76c04b5ad80cefa068e0f349b80ea4decb1 | [
"Apache-2.0"
] | 1 | 2022-01-05T11:48:21.000Z | 2022-01-05T11:48:21.000Z | tensorflow_gan/examples/self_attention_estimator/discriminator_test.py | HabibMrad/gan-1 | 6a2bf12f968d0a913e8040121edc8bb6e0680a08 | [
"Apache-2.0"
] | 1 | 2021-02-24T00:51:29.000Z | 2021-02-24T00:51:29.000Z | tensorflow_gan/examples/self_attention_estimator/discriminator_test.py | HabibMrad/gan-1 | 6a2bf12f968d0a913e8040121edc8bb6e0680a08 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the discriminator and its helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.self_attention_estimator import discriminator
class DiscriminatorTest(tf.test.TestCase):
def test_generator_shapes_and_ranges(self):
"""Tests the discriminator.
Make sure the image shapes and output value ranges are as expected.
"""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
batch_size = 10
num_classes = 1000
gen_class_logits = tf.zeros((batch_size, num_classes))
gen_class_ints = tf.random.categorical(
logits=gen_class_logits, num_samples=1)
gen_sparse_class = tf.squeeze(gen_class_ints)
images = tf.random.normal([10, 32, 32, 3])
d_out, var_list = discriminator.discriminator(images, gen_sparse_class, 16,
1000)
sess = tf.train.MonitoredTrainingSession()
images_np = sess.run(d_out)
self.assertEqual((batch_size, 1), images_np.shape)
self.assertAllInRange(images_np, -1.0, 1.0)
self.assertIsInstance(var_list, list)
def test_dsample_shapes(self):
"""Tests that downsampling has the desired effect on shape."""
image = tf.random.normal([10, 32, 32, 3])
big_image = discriminator.dsample(image)
self.assertEqual([10, 16, 16, 3], big_image.shape.as_list())
def test_block_shapes(self):
"""Tests that passing volumes through blocks affects shapes correctly."""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
image = tf.random.normal([10, 32, 32, 3])
image_after_block = discriminator.block(image, 13, 'test_block')
self.assertEqual([10, 16, 16, 13], image_after_block.shape.as_list())
def test_optimized_block_shapes(self):
"""Tests that passing volumes through blocks affects shapes correctly."""
if tf.executing_eagerly():
# `compute_spectral_norm` doesn't work when executing eagerly.
return
image = tf.random.normal([10, 32, 32, 3])
image_after_block = discriminator.optimized_block(image, 13, 'test_block')
self.assertEqual([10, 16, 16, 13], image_after_block.shape.as_list())
if __name__ == '__main__':
tf.test.main()
| 38.512821 | 79 | 0.721039 |
7954d9d64c2672f636daa3aef67464d8a108a6fd | 2,369 | py | Python | Optimus_5Prime/dataloader.py | Luma-1994/lama | 60d802e2e4cce789f03eea11b038212ba5f7fd1b | [
"MIT"
] | 137 | 2018-03-13T17:44:46.000Z | 2022-02-18T06:07:45.000Z | Optimus_5Prime/dataloader.py | Luma-1994/lama | 60d802e2e4cce789f03eea11b038212ba5f7fd1b | [
"MIT"
] | 111 | 2018-03-14T08:16:35.000Z | 2022-03-04T18:26:41.000Z | Optimus_5Prime/dataloader.py | Luma-1994/lama | 60d802e2e4cce789f03eea11b038212ba5f7fd1b | [
"MIT"
] | 57 | 2018-03-14T08:39:24.000Z | 2022-02-01T15:56:04.000Z | import gffutils
import pybedtools
from kipoi.data import Dataset
from kipoiseq.transforms import OneHot
from kipoiseq.extractors import FastaStringExtractor
from kipoi.metadata import GenomicRanges
def get_upstream(feature, n_bases):
"""Get the upstream interval of the genomic feature
Args:
feature: gffutils.Feature
n_bases
[ interval ][>>>feature>>>>]
<- n_bases->
"""
if feature.strand == '-':
start = feature.end
end = feature.end + n_bases
else:
start = feature.start - n_bases
end = feature.start
return pybedtools.create_interval_from_list([feature.chrom, start, end,
feature.id, feature.score, feature.strand])
class FixedSeq5UtrDl(Dataset):
n_upstream = 50
def __init__(self, gtf_file, fasta_file,
disable_infer_transcripts=True,
disable_infer_genes=True):
self.gtf_file = gtf_file
self.fasta_file = fasta_file
self.fasta_extractor = None
self.db = gffutils.create_db(gtf_file, ":memory:",
disable_infer_transcripts=disable_infer_transcripts,
disable_infer_genes=disable_infer_genes)
self.start_codons = list(self.db.features_of_type("start_codon"))
self.input_transform = OneHot()
def __len__(self):
return len(self.start_codons)
def __getitem__(self, idx):
if self.fasta_extractor is None:
self.fasta_extractor = FastaStringExtractor(self.fasta_file,
use_strand=True,
force_upper=True)
feature = self.start_codons[idx]
interval = get_upstream(feature, self.n_upstream)
seq = self.fasta_extractor.extract(interval)
seq_one_hot_encoded = self.input_transform(seq)
return {
"inputs": seq_one_hot_encoded,
"metadata": {
"ranges": GenomicRanges.from_interval(interval),
"gene_id": feature.attributes.get('gene_id', [""])[0],
"transcript_id": feature.attributes.get('transcript_id', [""])[0],
"gene_biotype": feature.attributes.get('gene_biotype', [""])[0]
}
} | 35.893939 | 92 | 0.590967 |
7954d9eed3f9d91d444cc16f6f724caf872c0435 | 1,518 | py | Python | pysrc/lecture.py | kamnxt/campuscircle | 8b5c2aa823709472fc501174626a7089fdcc1743 | [
"MIT"
] | null | null | null | pysrc/lecture.py | kamnxt/campuscircle | 8b5c2aa823709472fc501174626a7089fdcc1743 | [
"MIT"
] | null | null | null | pysrc/lecture.py | kamnxt/campuscircle | 8b5c2aa823709472fc501174626a7089fdcc1743 | [
"MIT"
] | null | null | null | import event
from utils import get_last_part_of_string_filtered, get_time_from_period
class Lecture(event.Event):
def __init__(self, dictform):
self._dictform = dictform
def to_jsonable(self):
return self._dictform
@classmethod
def from_html(cls, date, html):
split = html.text.split(":")
period = split[0].strip()
lastpart = ""
flag = False
info = split[1].strip()
try:
time_from_period = get_time_from_period(period)
except ValueError:
flag = True
period, moreinfo = get_last_part_of_string_filtered(
period, lambda c: c.isalnum()
)
info = moreinfo + info
time_from_period = get_time_from_period(period)
from_time, to_time = time_from_period
from_h, from_m = (int(x) for x in from_time.split(":"))
to_h, to_m = (int(x) for x in to_time.split(":"))
try:
loc_split = info.split("@")
info = "@".join(loc_split[:-1])
location = loc_split[-1]
except IndexError:
location = ""
return cls(
{
"type": "lecture",
"from_time": from_time, # day.replace(hour=from_h, minute=from_m).isoformat(),
"to_time": to_time, # day.replace(hour=to_h, minute=to_m).isoformat(),
"location": location,
"info": info,
"flag": flag,
}
)
| 30.979592 | 95 | 0.538208 |
7954db89b7afa3d5db0bed4c5263e2822789afa6 | 5,139 | py | Python | ansible/lib/ansible/modules/extras/network/citrix/netscaler.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/network/citrix/netscaler.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/network/citrix/netscaler.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage Citrix NetScaler entities
(c) 2013, Nandor Sivok <nandor@gawker.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: netscaler
version_added: "1.1"
short_description: Manages Citrix NetScaler entities
description:
- Manages Citrix NetScaler server and service entities.
options:
nsc_host:
description:
- hostname or ip of your netscaler
required: true
default: null
aliases: []
nsc_protocol:
description:
- protocol used to access netscaler
required: false
default: https
aliases: []
user:
description:
- username
required: true
default: null
aliases: []
password:
description:
- password
required: true
default: null
aliases: []
action:
description:
- the action you want to perform on the entity
required: false
default: disable
choices: ["enable", "disable"]
aliases: []
name:
description:
- name of the entity
required: true
default: hostname
aliases: []
type:
description:
- type of the entity
required: false
default: server
choices: ["server", "service"]
aliases: []
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
author: "Nandor Sivok (@dominis)"
'''
EXAMPLES = '''
# Disable the server
ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass"
# Enable the server
ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable"
# Disable the service local:8080
ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable"
'''
import base64
import socket
import urllib
class netscaler(object):
_nitro_base_url = '/nitro/v1/'
def __init__(self, module):
self.module = module
def http_request(self, api_endpoint, data_json={}):
request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
data_json = urllib.urlencode(data_json)
if not len(data_json):
data_json = None
auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
headers = {
'Authorization': 'Basic %s' % auth,
'Content-Type' : 'application/x-www-form-urlencoded',
}
response, info = fetch_url(self.module, request_url, data=data_json, headers=headers)
return json.load(response)
def prepare_request(self, action):
resp = self.http_request(
'config',
{
"object":
{
"params": {"action": action},
self._type: {"name": self._name}
}
}
)
return resp
def core(module):
n = netscaler(module)
n._nsc_host = module.params.get('nsc_host')
n._nsc_user = module.params.get('user')
n._nsc_pass = module.params.get('password')
n._nsc_protocol = module.params.get('nsc_protocol')
n._name = module.params.get('name')
n._type = module.params.get('type')
action = module.params.get('action')
r = n.prepare_request(action)
return r['errorcode'], r
def main():
module = AnsibleModule(
argument_spec = dict(
nsc_host = dict(required=True),
nsc_protocol = dict(default='https'),
user = dict(required=True),
password = dict(required=True),
action = dict(default='enable', choices=['enable','disable']),
name = dict(default=socket.gethostname()),
type = dict(default='server', choices=['service', 'server']),
validate_certs=dict(default='yes', type='bool'),
)
)
rc = 0
try:
rc, result = core(module)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if rc != 0:
module.fail_json(rc=rc, msg=result)
else:
result['changed'] = True
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.pycompat24 import get_exception
main()
| 26.765625 | 129 | 0.643316 |
7954dc2dac231e61c25b29da283258592517e0ff | 8,427 | py | Python | back-end/object_detection/trainer_test.py | scorelab/Elphas | be3e3906fa1f69155dc3f61f5c0bf21568e712c9 | [
"Apache-2.0"
] | 59 | 2018-09-23T09:34:24.000Z | 2020-03-10T04:31:27.000Z | back-end/object_detection/trainer_test.py | scorelab/Elphas | be3e3906fa1f69155dc3f61f5c0bf21568e712c9 | [
"Apache-2.0"
] | 5 | 2018-10-02T14:49:12.000Z | 2020-07-14T02:54:30.000Z | back-end/object_detection/trainer_test.py | scorelab/Elphas | be3e3906fa1f69155dc3f61f5c0bf21568e712c9 | [
"Apache-2.0"
] | 58 | 2018-09-23T10:31:47.000Z | 2021-11-08T11:34:40.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = tf.random_uniform([32, 32, 3], dtype=tf.float32)
key = tf.constant('image_000000')
class_label = tf.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32)
box_label = tf.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=tf.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss()
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return tf.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs)
class_prediction = tf.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': tf.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = tf.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = tf.constant(
1.0, dtype=tf.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': tf.reduce_sum(location_losses),
'classification_loss': tf.reduce_sum(cls_losses),
}
return loss_dict
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in tf.global_variables()}
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
tf.test.main()
| 38.131222 | 85 | 0.628219 |
7954dc8cfc36b35bf3e7d164bf39e6d187d97ff4 | 1,834 | py | Python | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/operation.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/operation.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/operation.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""An operation for Azure Container Instance service.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the operation.
:type name: str
:param display: Required. The display information of the operation.
:type display: ~azure.mgmt.containerinstance.models.OperationDisplay
:param properties: The additional properties.
:type properties: object
:param origin: The intended executor of the operation. Possible values
include: 'User', 'System'
:type origin: str or
~azure.mgmt.containerinstance.models.ContainerInstanceOperationsOrigin
"""
_validation = {
'name': {'required': True},
'display': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.properties = kwargs.get('properties', None)
self.origin = kwargs.get('origin', None)
| 36.68 | 76 | 0.610142 |
7954dd3647c450f2a05a6d8dfbc85c0882bfa594 | 354 | py | Python | Code.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | null | null | null | Code.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | null | null | null | Code.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | 1 | 2020-06-28T06:07:17.000Z | 2020-06-28T06:07:17.000Z | import os
from PIL.Image import *
def image():
image = open("TR.jpg")
print(image.format, image.size, image.mode)
(x, y) = image.size
for i in range(x):
for j in range(y):
(r, g, b) = image.getpixel((i, j))
image.putpixel((i, j), (2 * r, 2 * g, 2 * b))
Image.show(image)
return False
| 25.285714 | 58 | 0.511299 |
7954dd78e94e5ae2af66831564c1c8a6c7623ca8 | 1,301 | py | Python | No_0172_Factorial Trailing Zeroes/factorial_trailing_zeroes_iterative.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 32 | 2020-01-05T13:37:16.000Z | 2022-03-26T07:27:09.000Z | No_0172_Factorial Trailing Zeroes/factorial_trailing_zeroes_iterative.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | null | null | null | No_0172_Factorial Trailing Zeroes/factorial_trailing_zeroes_iterative.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 8 | 2020-06-18T16:17:27.000Z | 2022-03-15T23:58:18.000Z | '''
Description:
Given an integer n, return the number of trailing zeroes in n!.
Example 1:
Input: 3
Output: 0
Explanation: 3! = 6, no trailing zero.
Example 2:
Input: 5
Output: 1
Explanation: 5! = 120, one trailing zero.
'''
# Hint:
# Trailing 0s are generated from the product of 5 x 2 pair.
# And the bounding condition is 5 instead of 2
# ( the number of multiplier of 5 is less than the number of multiplier of 2 within constant n! )
#
# Find the 5 x 2 pair can be reduced to find the number of factor 5 in n!
class Solution:
def trailingZeroes(self, n: int) -> int:
count_of_5 = 0
while n > 4 :
count_of_5 += int( n // 5 )
n = n / 5
return count_of_5
# N : the input number of n
## Time Complexity: O( log n )
#
# The overhead is the while loop, it takes O( log5 n ) to reach base case.
## Space Complexity: O( 1 )
#
# The overhead is the variable for looping index and counter of 5
def test_bench():
test_data = [3, 5]
# expected output:
'''
0
1
Note:
3 ! = 6, no trailing 0s
5 ! = 120, with 1 trailing 0
'''
for n in test_data:
print( Solution().trailingZeroes(n) )
return
if __name__ == '__main__':
test_bench() | 16.896104 | 97 | 0.595696 |
7954dea37f6e9a6fb91fbb15629b0d1e379f177e | 223,668 | py | Python | venv/lib/python3.8/site-packages/plotly/basedatatypes.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | venv/lib/python3.8/site-packages/plotly/basedatatypes.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 1 | 2021-08-03T12:23:01.000Z | 2021-08-10T08:35:22.000Z | venv/lib/python3.8/site-packages/plotly/basedatatypes.py | SvtFilatov/soccer-predict-prices | b003ebd7c3657688790183ef7d719c42290c11b9 | [
"MIT"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | from __future__ import absolute_import
import collections
from collections import OrderedDict
import re
import six
from six import string_types
import warnings
from contextlib import contextmanager
from copy import deepcopy, copy
import itertools
from functools import reduce
from _plotly_utils.utils import (
_natural_sort_strings,
_get_int_type,
split_multichar,
split_string_positions,
display_string_positions,
chomp_empty_strings,
find_closest_string,
)
from _plotly_utils.exceptions import PlotlyKeyError
from .optional_imports import get_module
from . import shapeannotation
from . import subplots
# Create Undefined sentinel value
# - Setting a property to None removes any existing value
# - Setting a property to Undefined leaves existing value unmodified
Undefined = object()
def _len_dict_item(item):
"""
Because a parsed dict path is a tuple containings strings or integers, to
know the length of the resulting string when printing we might need to
convert to a string before calling len on it.
"""
try:
l = len(item)
except TypeError:
try:
l = len("%d" % (item,))
except TypeError:
raise ValueError(
"Cannot find string length of an item that is not string-like nor an integer."
)
return l
def _str_to_dict_path_full(key_path_str):
"""
Convert a key path string into a tuple of key path elements and also
return a tuple of indices marking the beginning of each element in the
string.
Parameters
----------
key_path_str : str
Key path string, where nested keys are joined on '.' characters
and array indexes are specified using brackets
(e.g. 'foo.bar[1]')
Returns
-------
tuple[str | int]
tuple [int]
"""
# skip all the parsing if the string is empty
if len(key_path_str):
# split string on ".[]" and filter out empty strings
key_path2 = split_multichar([key_path_str], list(".[]"))
# Split out underscore
# e.g. ['foo', 'bar_baz', '1'] -> ['foo', 'bar', 'baz', '1']
key_path3 = []
underscore_props = BaseFigure._valid_underscore_properties
def _make_hyphen_key(key):
if "_" in key[1:]:
# For valid properties that contain underscores (error_x)
# replace the underscores with hyphens to protect them
# from being split up
for under_prop, hyphen_prop in underscore_props.items():
key = key.replace(under_prop, hyphen_prop)
return key
def _make_underscore_key(key):
return key.replace("-", "_")
key_path2b = list(map(_make_hyphen_key, key_path2))
# Here we want to split up each non-empty string in the list at
# underscores and recombine the strings using chomp_empty_strings so
# that leading, trailing and multiple _ will be preserved
def _split_and_chomp(s):
if not len(s):
return s
s_split = split_multichar([s], list("_"))
# handle key paths like "a_path_", "_another_path", or
# "yet__another_path" by joining extra "_" to the string to the right or
# the empty string if at the end
s_chomped = chomp_empty_strings(s_split, "_", reverse=True)
return s_chomped
# after running _split_and_chomp on key_path2b, it will be a list
# containing strings and lists of strings; concatenate the sublists with
# the list ("lift" the items out of the sublists)
key_path2c = list(
reduce(
lambda x, y: x + y if type(y) == type(list()) else x + [y],
map(_split_and_chomp, key_path2b),
[],
)
)
key_path2d = list(map(_make_underscore_key, key_path2c))
all_elem_idcs = tuple(split_string_positions(list(key_path2d)))
# remove empty strings, and indices pointing to them
key_elem_pairs = list(filter(lambda t: len(t[1]), enumerate(key_path2d)))
key_path3 = [x for _, x in key_elem_pairs]
elem_idcs = [all_elem_idcs[i] for i, _ in key_elem_pairs]
# Convert elements to ints if possible.
# e.g. ['foo', 'bar', '0'] -> ['foo', 'bar', 0]
for i in range(len(key_path3)):
try:
key_path3[i] = int(key_path3[i])
except ValueError as _:
pass
else:
key_path3 = []
elem_idcs = []
return (tuple(key_path3), elem_idcs)
def _remake_path_from_tuple(props):
"""
try to remake a path using the properties in props
"""
if len(props) == 0:
return ""
def _add_square_brackets_to_number(n):
if type(n) == type(int()):
return "[%d]" % (n,)
return n
def _prepend_dot_if_not_number(s):
if not s.startswith("["):
return "." + s
return s
props_all_str = list(map(_add_square_brackets_to_number, props))
props_w_underscore = props_all_str[:1] + list(
map(_prepend_dot_if_not_number, props_all_str[1:])
)
return "".join(props_w_underscore)
def _check_path_in_prop_tree(obj, path, error_cast=None):
"""
obj: the object in which the first property is looked up
path: the path that will be split into properties to be looked up
path can also be a tuple. In this case, it is combined using .
and [] because it is impossible to reconstruct the string fully
in order to give a decent error message.
error_cast: this function walks down the property tree by looking up values
in objects. So this will throw exceptions that are thrown by
__getitem__, but in some cases we are checking the path for a
different reason and would prefer throwing a more relevant
exception (e.g., __getitem__ throws KeyError but __setitem__
throws ValueError for subclasses of BasePlotlyType and
BaseFigure). So the resulting error can be "casted" to the
passed in type, if not None.
returns
an Exception object or None. The caller can raise this
exception to see where the lookup error occurred.
"""
if isinstance(path, tuple):
path = _remake_path_from_tuple(path)
prop, prop_idcs = _str_to_dict_path_full(path)
prev_objs = []
for i, p in enumerate(prop):
arg = ""
prev_objs.append(obj)
try:
obj = obj[p]
except (ValueError, KeyError, IndexError, TypeError) as e:
arg = e.args[0]
if issubclass(e.__class__, TypeError):
# If obj doesn't support subscripting, state that and show the
# (valid) property that gives the object that doesn't support
# subscripting.
if i > 0:
validator = prev_objs[i - 1]._get_validator(prop[i - 1])
arg += """
Invalid value received for the '{plotly_name}' property of {parent_name}
{description}""".format(
parent_name=validator.parent_name,
plotly_name=validator.plotly_name,
description=validator.description(),
)
# In case i is 0, the best we can do is indicate the first
# property in the string as having caused the error
disp_i = max(i - 1, 0)
dict_item_len = _len_dict_item(prop[disp_i])
# if the path has trailing underscores, the prop string will start with "_"
trailing_underscores = ""
if prop[i][0] == "_":
trailing_underscores = " and path has trailing underscores"
# if the path has trailing underscores and the display index is
# one less than the prop index (see above), then we can also
# indicate the offending underscores
if (trailing_underscores != "") and (disp_i != i):
dict_item_len += _len_dict_item(prop[i])
arg += """
Property does not support subscripting%s:
%s
%s""" % (
trailing_underscores,
path,
display_string_positions(
prop_idcs, disp_i, length=dict_item_len, char="^"
),
)
else:
# State that the property for which subscripting was attempted
# is bad and indicate the start of the bad property.
arg += """
Bad property path:
%s
%s""" % (
path,
display_string_positions(
prop_idcs, i, length=_len_dict_item(prop[i]), char="^"
),
)
# Make KeyError more pretty by changing it to a PlotlyKeyError,
# because the Python interpreter has a special way of printing
# KeyError
if isinstance(e, KeyError):
e = PlotlyKeyError()
if error_cast is not None:
e = error_cast()
e.args = (arg,)
return e
return None
def _combine_dicts(dicts):
all_args = dict()
for d in dicts:
for k in d:
all_args[k] = d[k]
return all_args
def _indexing_combinations(dims, alls, product=False):
"""
Gives indexing tuples specified by the coordinates in dims.
If a member of dims is 'all' then it is replaced by the corresponding member
in alls.
If product is True, then the cartesian product of all the indices is
returned, otherwise the zip (that means index lists of mis-matched length
will yield a list of tuples whose length is the length of the shortest
list).
"""
if len(dims) == 0:
# this is because list(itertools.product(*[])) returns [()] which has non-zero
# length!
return []
if len(dims) != len(alls):
raise ValueError(
"Must have corresponding values in alls for each value of dims. Got dims=%s and alls=%s."
% (str(dims), str(alls))
)
r = []
for d, a in zip(dims, alls):
if d == "all":
d = a
elif not isinstance(d, list):
d = [d]
r.append(d)
if product:
return itertools.product(*r)
else:
return zip(*r)
def _is_select_subplot_coordinates_arg(*args):
""" Returns true if any args are lists or the string 'all' """
return any((a == "all") or isinstance(a, list) for a in args)
def _axis_spanning_shapes_docstr(shape_type):
docstr = ""
if shape_type == "hline":
docstr = """
Add a horizontal line to a plot or subplot that extends infinitely in the
x-dimension.
Parameters
----------
y: float or int
A number representing the y coordinate of the horizontal line."""
elif shape_type == "vline":
docstr = """
Add a vertical line to a plot or subplot that extends infinitely in the
y-dimension.
Parameters
----------
x: float or int
A number representing the x coordinate of the vertical line."""
elif shape_type == "hrect":
docstr = """
Add a rectangle to a plot or subplot that extends infinitely in the
x-dimension.
Parameters
----------
y0: float or int
A number representing the y coordinate of one side of the rectangle.
y1: float or int
A number representing the y coordinate of the other side of the rectangle."""
elif shape_type == "vrect":
docstr = """
Add a rectangle to a plot or subplot that extends infinitely in the
y-dimension.
Parameters
----------
x0: float or int
A number representing the x coordinate of one side of the rectangle.
x1: float or int
A number representing the x coordinate of the other side of the rectangle."""
docstr += """
exclude_empty_subplots: Boolean
If True (default) do not place the shape on subplots that have no data
plotted on them.
row: None, int or 'all'
Subplot row for shape indexed starting at 1. If 'all', addresses all rows in
the specified column(s). If both row and col are None, addresses the
first subplot if subplots exist, or the only plot. By default is "all".
col: None, int or 'all'
Subplot column for shape indexed starting at 1. If 'all', addresses all rows in
the specified column(s). If both row and col are None, addresses the
first subplot if subplots exist, or the only plot. By default is "all".
annotation: dict or plotly.graph_objects.layout.Annotation. If dict(),
it is interpreted as describing an annotation. The annotation is
placed relative to the shape based on annotation_position (see
below) unless its x or y value has been specified for the annotation
passed here. xref and yref are always the same as for the added
shape and cannot be overridden."""
if shape_type in ["hline", "vline"]:
docstr += """
annotation_position: a string containing optionally ["top", "bottom"]
and ["left", "right"] specifying where the text should be anchored
to on the line. Example positions are "bottom left", "right top",
"right", "bottom". If an annotation is added but annotation_position is
not specified, this defaults to "top right"."""
elif shape_type in ["hrect", "vrect"]:
docstr += """
annotation_position: a string containing optionally ["inside", "outside"], ["top", "bottom"]
and ["left", "right"] specifying where the text should be anchored
to on the rectangle. Example positions are "outside top left", "inside
bottom", "right", "inside left", "inside" ("outside" is not supported). If
an annotation is added but annotation_position is not specified this
defaults to "inside top right"."""
docstr += """
annotation_*: any parameters to go.layout.Annotation can be passed as
keywords by prefixing them with "annotation_". For example, to specify the
annotation text "example" you can pass annotation_text="example" as a
keyword argument.
**kwargs:
Any named function parameters that can be passed to 'add_shape',
except for x0, x1, y0, y1 or type."""
return docstr
def _generator(i):
""" "cast" an iterator to a generator """
for x in i:
yield x
class BaseFigure(object):
"""
Base class for all figure types (both widget and non-widget)
"""
_bracket_re = re.compile(r"^(.*)\[(\d+)\]$")
_valid_underscore_properties = {
"error_x": "error-x",
"error_y": "error-y",
"error_z": "error-z",
"copy_xstyle": "copy-xstyle",
"copy_ystyle": "copy-ystyle",
"copy_zstyle": "copy-zstyle",
"paper_bgcolor": "paper-bgcolor",
"plot_bgcolor": "plot-bgcolor",
}
_set_trace_uid = False
_allow_disable_validation = True
# Constructor
# -----------
def __init__(
self, data=None, layout_plotly=None, frames=None, skip_invalid=False, **kwargs
):
"""
Construct a BaseFigure object
Parameters
----------
data
One of:
- A list or tuple of trace objects (or dicts that can be coerced
into trace objects)
- If `data` is a dict that contains a 'data',
'layout', or 'frames' key then these values are used to
construct the figure.
- If `data` is a `BaseFigure` instance then the `data`, `layout`,
and `frames` properties are extracted from the input figure
layout_plotly
The plotly layout dict.
Note: this property is named `layout_plotly` rather than `layout`
to deconflict it with the `layout` constructor parameter of the
`widgets.DOMWidget` ipywidgets class, as the `BaseFigureWidget`
class is a subclass of both BaseFigure and widgets.DOMWidget.
If the `data` property is a BaseFigure instance, or a dict that
contains a 'layout' key, then this property is ignored.
frames
A list or tuple of `plotly.graph_objs.Frame` objects (or dicts
that can be coerced into Frame objects)
If the `data` property is a BaseFigure instance, or a dict that
contains a 'frames' key, then this property is ignored.
skip_invalid: bool
If True, invalid properties in the figure specification will be
skipped silently. If False (default) invalid properties in the
figure specification will result in a ValueError
Raises
------
ValueError
if a property in the specification of data, layout, or frames
is invalid AND skip_invalid is False
"""
from .validators import DataValidator, LayoutValidator, FramesValidator
super(BaseFigure, self).__init__()
# Initialize validation
self._validate = kwargs.pop("_validate", True)
# Assign layout_plotly to layout
# ------------------------------
# See docstring note for explanation
layout = layout_plotly
# Subplot properties
# ------------------
# These properties are used by the tools.make_subplots logic.
# We initialize them to None here, before checking if the input data
# object is a BaseFigure, or a dict with _grid_str and _grid_ref
# properties, in which case we bring over the _grid* properties of
# the input
self._grid_str = None
self._grid_ref = None
# Handle case where data is a Figure or Figure-like dict
# ------------------------------------------------------
if isinstance(data, BaseFigure):
# Bring over subplot fields
self._grid_str = data._grid_str
self._grid_ref = data._grid_ref
# Extract data, layout, and frames
data, layout, frames = data.data, data.layout, data.frames
elif isinstance(data, dict) and (
"data" in data or "layout" in data or "frames" in data
):
# Bring over subplot fields
self._grid_str = data.get("_grid_str", None)
self._grid_ref = data.get("_grid_ref", None)
# Extract data, layout, and frames
data, layout, frames = (
data.get("data", None),
data.get("layout", None),
data.get("frames", None),
)
# Handle data (traces)
# --------------------
# ### Construct data validator ###
# This is the validator that handles importing sequences of trace
# objects
self._data_validator = DataValidator(set_uid=self._set_trace_uid)
# ### Import traces ###
data = self._data_validator.validate_coerce(
data, skip_invalid=skip_invalid, _validate=self._validate
)
# ### Save tuple of trace objects ###
self._data_objs = data
# ### Import clone of trace properties ###
# The _data property is a list of dicts containing the properties
# explicitly set by the user for each trace.
self._data = [deepcopy(trace._props) for trace in data]
# ### Create data defaults ###
# _data_defaults is a tuple of dicts, one for each trace. When
# running in a widget context, these defaults are populated with
# all property values chosen by the Plotly.js library that
# aren't explicitly specified by the user.
#
# Note: No property should exist in both the _data and
# _data_defaults for the same trace.
self._data_defaults = [{} for _ in data]
# ### Reparent trace objects ###
for trace_ind, trace in enumerate(data):
# By setting the trace's parent to be this figure, we tell the
# trace object to use the figure's _data and _data_defaults
# dicts to get/set it's properties, rather than using the trace
# object's internal _orphan_props dict.
trace._parent = self
# We clear the orphan props since the trace no longer needs then
trace._orphan_props.clear()
# Set trace index
trace._trace_ind = trace_ind
# Layout
# ------
# ### Construct layout validator ###
# This is the validator that handles importing Layout objects
self._layout_validator = LayoutValidator()
# ### Import Layout ###
self._layout_obj = self._layout_validator.validate_coerce(
layout, skip_invalid=skip_invalid, _validate=self._validate
)
# ### Import clone of layout properties ###
self._layout = deepcopy(self._layout_obj._props)
# ### Initialize layout defaults dict ###
self._layout_defaults = {}
# ### Reparent layout object ###
self._layout_obj._orphan_props.clear()
self._layout_obj._parent = self
# Config
# ------
# Pass along default config to the front end. For now this just
# ensures that the plotly domain url gets passed to the front end.
# In the future we can extend this to allow the user to supply
# arbitrary config options like in plotly.offline.plot/iplot. But
# this will require a fair amount of testing to determine which
# options are compatible with FigureWidget.
from plotly.offline.offline import _get_jconfig
self._config = _get_jconfig(None)
# Frames
# ------
# ### Construct frames validator ###
# This is the validator that handles importing sequences of frame
# objects
self._frames_validator = FramesValidator()
# ### Import frames ###
self._frame_objs = self._frames_validator.validate_coerce(
frames, skip_invalid=skip_invalid
)
# Note: Because frames are not currently supported in the widget
# context, we don't need to follow the pattern above and create
# _frames and _frame_defaults properties and then reparent the
# frames. The figure doesn't need to be notified of
# changes to the properties in the frames object hierarchy.
# Context manager
# ---------------
# ### batch mode indicator ###
# Flag that indicates whether we're currently inside a batch_*()
# context
self._in_batch_mode = False
# ### Batch trace edits ###
# Dict from trace indexes to trace edit dicts. These trace edit dicts
# are suitable as `data` elements of Plotly.animate, but not
# the Plotly.update (See `_build_update_params_from_batch`)
self._batch_trace_edits = OrderedDict()
# ### Batch layout edits ###
# Dict from layout properties to new layout values. This dict is
# directly suitable for use in Plotly.animate and Plotly.update
self._batch_layout_edits = OrderedDict()
# Animation property validators
# -----------------------------
from . import animation
self._animation_duration_validator = animation.DurationValidator()
self._animation_easing_validator = animation.EasingValidator()
# Template
# --------
# ### Check for default template ###
self._initialize_layout_template()
# Process kwargs
# --------------
for k, v in kwargs.items():
err = _check_path_in_prop_tree(self, k)
if err is None:
self[k] = v
elif not skip_invalid:
type_err = TypeError("invalid Figure property: {}".format(k))
type_err.args = (
type_err.args[0]
+ """
%s"""
% (err.args[0],),
)
raise type_err
# Magic Methods
# -------------
def __reduce__(self):
"""
Custom implementation of reduce is used to support deep copying
and pickling
"""
props = self.to_dict()
props["_grid_str"] = self._grid_str
props["_grid_ref"] = self._grid_ref
return (self.__class__, (props,))
def __setitem__(self, prop, value):
# Normalize prop
# --------------
# Convert into a property tuple
orig_prop = prop
prop = BaseFigure._str_to_dict_path(prop)
# Handle empty case
# -----------------
if len(prop) == 0:
raise KeyError(orig_prop)
# Handle scalar case
# ------------------
# e.g. ('foo',)
elif len(prop) == 1:
# ### Unwrap scalar tuple ###
prop = prop[0]
if prop == "data":
self.data = value
elif prop == "layout":
self.layout = value
elif prop == "frames":
self.frames = value
else:
raise KeyError(prop)
# Handle non-scalar case
# ----------------------
# e.g. ('foo', 1)
else:
err = _check_path_in_prop_tree(self, orig_prop, error_cast=ValueError)
if err is not None:
raise err
res = self
for p in prop[:-1]:
res = res[p]
res._validate = self._validate
res[prop[-1]] = value
def __setattr__(self, prop, value):
"""
Parameters
----------
prop : str
The name of a direct child of this object
value
New property value
Returns
-------
None
"""
if prop.startswith("_") or hasattr(self, prop):
# Let known properties and private properties through
super(BaseFigure, self).__setattr__(prop, value)
else:
# Raise error on unknown public properties
raise AttributeError(prop)
def __getitem__(self, prop):
# Normalize prop
# --------------
# Convert into a property tuple
orig_prop = prop
prop = BaseFigure._str_to_dict_path(prop)
# Handle scalar case
# ------------------
# e.g. ('foo',)
if len(prop) == 1:
# Unwrap scalar tuple
prop = prop[0]
if prop == "data":
return self._data_validator.present(self._data_objs)
elif prop == "layout":
return self._layout_validator.present(self._layout_obj)
elif prop == "frames":
return self._frames_validator.present(self._frame_objs)
else:
raise KeyError(orig_prop)
# Handle non-scalar case
# ----------------------
# e.g. ('foo', 1)
else:
err = _check_path_in_prop_tree(self, orig_prop, error_cast=PlotlyKeyError)
if err is not None:
raise err
res = self
for p in prop:
res = res[p]
return res
def __iter__(self):
return iter(("data", "layout", "frames"))
def __contains__(self, prop):
prop = BaseFigure._str_to_dict_path(prop)
if prop[0] not in ("data", "layout", "frames"):
return False
elif len(prop) == 1:
return True
else:
return prop[1:] in self[prop[0]]
def __eq__(self, other):
if not isinstance(other, BaseFigure):
# Require objects to both be BaseFigure instances
return False
else:
# Compare plotly_json representations
# Use _vals_equal instead of `==` to handle cases where
# underlying dicts contain numpy arrays
return BasePlotlyType._vals_equal(
self.to_plotly_json(), other.to_plotly_json()
)
def __repr__(self):
"""
Customize Figure representation when displayed in the
terminal/notebook
"""
props = self.to_plotly_json()
# Elide template
template_props = props.get("layout", {}).get("template", {})
if template_props:
props["layout"]["template"] = "..."
repr_str = BasePlotlyType._build_repr_for_class(
props=props, class_name=self.__class__.__name__
)
return repr_str
def _repr_html_(self):
"""
Customize html representation
"""
bundle = self._repr_mimebundle_()
if "text/html" in bundle:
return bundle["text/html"]
else:
return self.to_html(full_html=False, include_plotlyjs="cdn")
def _repr_mimebundle_(self, include=None, exclude=None, validate=True, **kwargs):
"""
Return mimebundle corresponding to default renderer.
"""
import plotly.io as pio
renderer_str = pio.renderers.default
renderers = pio._renderers.renderers
renderer_names = renderers._validate_coerce_renderers(renderer_str)
renderers_list = [renderers[name] for name in renderer_names]
from plotly.io._utils import validate_coerce_fig_to_dict
from plotly.io._renderers import MimetypeRenderer
fig_dict = validate_coerce_fig_to_dict(self, validate)
# Mimetype renderers
bundle = {}
for renderer in renderers_list:
if isinstance(renderer, MimetypeRenderer):
bundle.update(renderer.to_mimebundle(fig_dict))
return bundle
def _ipython_display_(self):
"""
Handle rich display of figures in ipython contexts
"""
import plotly.io as pio
if pio.renderers.render_on_display and pio.renderers.default:
pio.show(self)
else:
print(repr(self))
def update(self, dict1=None, overwrite=False, **kwargs):
"""
Update the properties of the figure with a dict and/or with
keyword arguments.
This recursively updates the structure of the figure
object with the values in the input dict / keyword arguments.
Parameters
----------
dict1 : dict
Dictionary of properties to be updated
overwrite: bool
If True, overwrite existing properties. If False, apply updates
to existing properties recursively, preserving existing
properties that are not specified in the update operation.
kwargs :
Keyword/value pair of properties to be updated
Examples
--------
>>> import plotly.graph_objs as go
>>> fig = go.Figure(data=[{'y': [1, 2, 3]}])
>>> fig.update(data=[{'y': [4, 5, 6]}]) # doctest: +ELLIPSIS
Figure(...)
>>> fig.to_plotly_json() # doctest: +SKIP
{'data': [{'type': 'scatter',
'uid': 'e86a7c7a-346a-11e8-8aa8-a0999b0c017b',
'y': array([4, 5, 6], dtype=int32)}],
'layout': {}}
>>> fig = go.Figure(layout={'xaxis':
... {'color': 'green',
... 'range': [0, 1]}})
>>> fig.update({'layout': {'xaxis': {'color': 'pink'}}}) # doctest: +ELLIPSIS
Figure(...)
>>> fig.to_plotly_json() # doctest: +SKIP
{'data': [],
'layout': {'xaxis':
{'color': 'pink',
'range': [0, 1]}}}
Returns
-------
BaseFigure
Updated figure
"""
with self.batch_update():
for d in [dict1, kwargs]:
if d:
for k, v in d.items():
update_target = self[k]
if update_target == () or overwrite:
if k == "data":
# Overwrite all traces as special due to
# restrictions on trace assignment
self.data = ()
self.add_traces(v)
else:
# Accept v
self[k] = v
elif (
isinstance(update_target, BasePlotlyType)
and isinstance(v, (dict, BasePlotlyType))
) or (
isinstance(update_target, tuple)
and isinstance(update_target[0], BasePlotlyType)
):
BaseFigure._perform_update(self[k], v)
else:
self[k] = v
return self
def pop(self, key, *args):
"""
Remove the value associated with the specified key and return it
Parameters
----------
key: str
Property name
dflt
The default value to return if key was not found in figure
Returns
-------
value
The removed value that was previously associated with key
Raises
------
KeyError
If key is not in object and no dflt argument specified
"""
# Handle default
if key not in self and args:
return args[0]
elif key in self:
val = self[key]
self[key] = None
return val
else:
raise KeyError(key)
# Data
# ----
@property
def data(self):
"""
The `data` property is a tuple of the figure's trace objects
Returns
-------
tuple[BaseTraceType]
"""
return self["data"]
@data.setter
def data(self, new_data):
# Validate new_data
# -----------------
err_header = (
"The data property of a figure may only be assigned \n"
"a list or tuple that contains a permutation of a "
"subset of itself.\n"
)
# ### Treat None as empty ###
if new_data is None:
new_data = ()
# ### Check valid input type ###
if not isinstance(new_data, (list, tuple)):
err_msg = err_header + " Received value with type {typ}".format(
typ=type(new_data)
)
raise ValueError(err_msg)
# ### Check valid element types ###
for trace in new_data:
if not isinstance(trace, BaseTraceType):
err_msg = (
err_header
+ " Received element value of type {typ}".format(typ=type(trace))
)
raise ValueError(err_msg)
# ### Check trace objects ###
# Require that no new traces are introduced
orig_uids = [id(trace) for trace in self.data]
new_uids = [id(trace) for trace in new_data]
invalid_uids = set(new_uids).difference(set(orig_uids))
if invalid_uids:
err_msg = err_header
raise ValueError(err_msg)
# ### Check for duplicates in assignment ###
uid_counter = collections.Counter(new_uids)
duplicate_uids = [uid for uid, count in uid_counter.items() if count > 1]
if duplicate_uids:
err_msg = err_header + " Received duplicated traces"
raise ValueError(err_msg)
# Remove traces
# -------------
remove_uids = set(orig_uids).difference(set(new_uids))
delete_inds = []
# ### Unparent removed traces ###
for i, trace in enumerate(self.data):
if id(trace) in remove_uids:
delete_inds.append(i)
# Unparent trace object to be removed
old_trace = self.data[i]
old_trace._orphan_props.update(deepcopy(old_trace._props))
old_trace._parent = None
old_trace._trace_ind = None
# ### Compute trace props / defaults after removal ###
traces_props_post_removal = [t for t in self._data]
traces_prop_defaults_post_removal = [t for t in self._data_defaults]
uids_post_removal = [id(trace_data) for trace_data in self.data]
for i in reversed(delete_inds):
del traces_props_post_removal[i]
del traces_prop_defaults_post_removal[i]
del uids_post_removal[i]
# Modify in-place so we don't trigger serialization
del self._data[i]
if delete_inds:
# Update widget, if any
self._send_deleteTraces_msg(delete_inds)
# Move traces
# -----------
# ### Compute new index for each remaining trace ###
new_inds = []
for uid in uids_post_removal:
new_inds.append(new_uids.index(uid))
# ### Compute current index for each remaining trace ###
current_inds = list(range(len(traces_props_post_removal)))
# ### Check whether a move is needed ###
if not all([i1 == i2 for i1, i2 in zip(new_inds, current_inds)]):
# #### Save off index lists for moveTraces message ####
msg_current_inds = current_inds
msg_new_inds = new_inds
# #### Reorder trace elements ####
# We do so in-place so we don't trigger traitlet property
# serialization for the FigureWidget case
# ##### Remove by curr_inds in reverse order #####
moving_traces_data = []
for ci in reversed(current_inds):
# Push moving traces data to front of list
moving_traces_data.insert(0, self._data[ci])
del self._data[ci]
# #### Sort new_inds and moving_traces_data by new_inds ####
new_inds, moving_traces_data = zip(
*sorted(zip(new_inds, moving_traces_data))
)
# #### Insert by new_inds in forward order ####
for ni, trace_data in zip(new_inds, moving_traces_data):
self._data.insert(ni, trace_data)
# #### Update widget, if any ####
self._send_moveTraces_msg(msg_current_inds, msg_new_inds)
# ### Update data defaults ###
# There is to front-end syncronization to worry about so this
# operations doesn't need to be in-place
self._data_defaults = [
_trace
for i, _trace in sorted(zip(new_inds, traces_prop_defaults_post_removal))
]
# Update trace objects tuple
self._data_objs = list(new_data)
# Update trace indexes
for trace_ind, trace in enumerate(self._data_objs):
trace._trace_ind = trace_ind
def select_traces(self, selector=None, row=None, col=None, secondary_y=None):
"""
Select traces from a particular subplot cell and/or traces
that satisfy custom selection criteria.
Parameters
----------
selector: dict, function, int, str or None (default None)
Dict to use as selection criteria.
Traces will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all traces are
selected. If a function, it must be a function accepting a single
argument and returning a boolean. The function will be called on
each trace and those for which the function returned True
will be in the selection. If an int N, the Nth trace matching row
and col will be selected (N can be negative). If a string S, the selector
is equivalent to dict(type=S).
row, col: int or None (default None)
Subplot row and column index of traces to select.
To select traces by row and column, the Figure must have been
created using plotly.subplots.make_subplots. If None
(the default), all traces are selected.
secondary_y: boolean or None (default None)
* If True, only select traces associated with the secondary
y-axis of the subplot.
* If False, only select traces associated with the primary
y-axis of the subplot.
* If None (the default), do not filter traces based on secondary
y-axis.
To select traces by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
Returns
-------
generator
Generator that iterates through all of the traces that satisfy
all of the specified selection criteria
"""
if not selector:
selector = {}
if row is not None or col is not None or secondary_y is not None:
grid_ref = self._validate_get_grid_ref()
filter_by_subplot = True
if row is None and col is not None:
# All rows for column
grid_subplot_ref_tuples = [ref_row[col - 1] for ref_row in grid_ref]
elif col is None and row is not None:
# All columns for row
grid_subplot_ref_tuples = grid_ref[row - 1]
elif col is not None and row is not None:
# Single grid cell
grid_subplot_ref_tuples = [grid_ref[row - 1][col - 1]]
else:
# row and col are None, secondary_y not None
grid_subplot_ref_tuples = [
refs for refs_row in grid_ref for refs in refs_row
]
# Collect list of subplot refs, taking secondary_y into account
grid_subplot_refs = []
for refs in grid_subplot_ref_tuples:
if not refs:
continue
if secondary_y is not True:
grid_subplot_refs.append(refs[0])
if secondary_y is not False and len(refs) > 1:
grid_subplot_refs.append(refs[1])
else:
filter_by_subplot = False
grid_subplot_refs = None
return self._perform_select_traces(
filter_by_subplot, grid_subplot_refs, selector
)
def _perform_select_traces(self, filter_by_subplot, grid_subplot_refs, selector):
from plotly.subplots import _get_subplot_ref_for_trace
# functions for filtering
def _filter_by_subplot_ref(trace):
trace_subplot_ref = _get_subplot_ref_for_trace(trace)
return trace_subplot_ref in grid_subplot_refs
funcs = []
if filter_by_subplot:
funcs.append(_filter_by_subplot_ref)
return _generator(self._filter_by_selector(self.data, funcs, selector))
@staticmethod
def _selector_matches(obj, selector):
if selector is None:
return True
# If selector is a string then put it at the 'type' key of a dictionary
# to select objects where "type":selector
if isinstance(selector, six.string_types):
selector = dict(type=selector)
# If selector is a dict, compare the fields
if isinstance(selector, dict) or isinstance(selector, BasePlotlyType):
# This returns True if selector is an empty dict
for k in selector:
if k not in obj:
return False
obj_val = obj[k]
selector_val = selector[k]
if isinstance(obj_val, BasePlotlyType):
obj_val = obj_val.to_plotly_json()
if isinstance(selector_val, BasePlotlyType):
selector_val = selector_val.to_plotly_json()
if obj_val != selector_val:
return False
return True
# If selector is a function, call it with the obj as the argument
elif six.callable(selector):
return selector(obj)
else:
raise TypeError(
"selector must be dict or a function "
"accepting a graph object returning a boolean."
)
def _filter_by_selector(self, objects, funcs, selector):
"""
objects is a sequence of objects, funcs a list of functions that
return True if the object should be included in the selection and False
otherwise and selector is an argument to the self._selector_matches
function.
If selector is an integer, the resulting sequence obtained after
sucessively filtering by each function in funcs is indexed by this
integer.
Otherwise selector is used as the selector argument to
self._selector_matches which is used to filter down the sequence.
The function returns the sequence (an iterator).
"""
# if selector is not an int, we call it on each trace to test it for selection
if not isinstance(selector, int):
funcs.append(lambda obj: self._selector_matches(obj, selector))
def _filt(last, f):
return filter(f, last)
filtered_objects = reduce(_filt, funcs, objects)
if isinstance(selector, int):
return iter([list(filtered_objects)[selector]])
return filtered_objects
def for_each_trace(self, fn, selector=None, row=None, col=None, secondary_y=None):
"""
Apply a function to all traces that satisfy the specified selection
criteria
Parameters
----------
fn:
Function that inputs a single trace object.
selector: dict, function, int, str or None (default None)
Dict to use as selection criteria.
Traces will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all traces are
selected. If a function, it must be a function accepting a single
argument and returning a boolean. The function will be called on
each trace and those for which the function returned True
will be in the selection. If an int N, the Nth trace matching row
and col will be selected (N can be negative). If a string S, the selector
is equivalent to dict(type=S).
row, col: int or None (default None)
Subplot row and column index of traces to select.
To select traces by row and column, the Figure must have been
created using plotly.subplots.make_subplots. If None
(the default), all traces are selected.
secondary_y: boolean or None (default None)
* If True, only select traces associated with the secondary
y-axis of the subplot.
* If False, only select traces associated with the primary
y-axis of the subplot.
* If None (the default), do not filter traces based on secondary
y-axis.
To select traces by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
Returns
-------
self
Returns the Figure object that the method was called on
"""
for trace in self.select_traces(
selector=selector, row=row, col=col, secondary_y=secondary_y
):
fn(trace)
return self
def update_traces(
self,
patch=None,
selector=None,
row=None,
col=None,
secondary_y=None,
overwrite=False,
**kwargs
):
"""
Perform a property update operation on all traces that satisfy the
specified selection criteria
Parameters
----------
patch: dict or None (default None)
Dictionary of property updates to be applied to all traces that
satisfy the selection criteria.
selector: dict, function, int, str or None (default None)
Dict to use as selection criteria.
Traces will be selected if they contain properties corresponding
to all of the dictionary's keys, with values that exactly match
the supplied values. If None (the default), all traces are
selected. If a function, it must be a function accepting a single
argument and returning a boolean. The function will be called on
each trace and those for which the function returned True
will be in the selection. If an int N, the Nth trace matching row
and col will be selected (N can be negative). If a string S, the selector
is equivalent to dict(type=S).
row, col: int or None (default None)
Subplot row and column index of traces to select.
To select traces by row and column, the Figure must have been
created using plotly.subplots.make_subplots. If None
(the default), all traces are selected.
secondary_y: boolean or None (default None)
* If True, only select traces associated with the secondary
y-axis of the subplot.
* If False, only select traces associated with the primary
y-axis of the subplot.
* If None (the default), do not filter traces based on secondary
y-axis.
To select traces by secondary y-axis, the Figure must have been
created using plotly.subplots.make_subplots. See the docstring
for the specs argument to make_subplots for more info on
creating subplots with secondary y-axes.
overwrite: bool
If True, overwrite existing properties. If False, apply updates
to existing properties recursively, preserving existing
properties that are not specified in the update operation.
**kwargs
Additional property updates to apply to each selected trace. If
a property is specified in both patch and in **kwargs then the
one in **kwargs takes precedence.
Returns
-------
self
Returns the Figure object that the method was called on
"""
for trace in self.select_traces(
selector=selector, row=row, col=col, secondary_y=secondary_y
):
trace.update(patch, overwrite=overwrite, **kwargs)
return self
def update_layout(self, dict1=None, overwrite=False, **kwargs):
"""
Update the properties of the figure's layout with a dict and/or with
keyword arguments.
This recursively updates the structure of the original
layout with the values in the input dict / keyword arguments.
Parameters
----------
dict1 : dict
Dictionary of properties to be updated
overwrite: bool
If True, overwrite existing properties. If False, apply updates
to existing properties recursively, preserving existing
properties that are not specified in the update operation.
kwargs :
Keyword/value pair of properties to be updated
Returns
-------
BaseFigure
The Figure object that the update_layout method was called on
"""
self.layout.update(dict1, overwrite=overwrite, **kwargs)
return self
def _select_layout_subplots_by_prefix(
self, prefix, selector=None, row=None, col=None, secondary_y=None
):
"""
Helper called by code generated select_* methods
"""
if row is not None or col is not None or secondary_y is not None:
# Build mapping from container keys ('xaxis2', 'scene4', etc.)
# to (row, col, secondary_y) triplets
grid_ref = self._validate_get_grid_ref()
container_to_row_col = {}
for r, subplot_row in enumerate(grid_ref):
for c, subplot_refs in enumerate(subplot_row):
if not subplot_refs:
continue
# collect primary keys
for i, subplot_ref in enumerate(subplot_refs):
for layout_key in subplot_ref.layout_keys:
if layout_key.startswith(prefix):
is_secondary_y = i == 1
container_to_row_col[layout_key] = (
r + 1,
c + 1,
is_secondary_y,
)
else:
container_to_row_col = None
layout_keys_filters = [
lambda k: k.startswith(prefix) and self.layout[k] is not None,
lambda k: row is None
or container_to_row_col.get(k, (None, None, None))[0] == row,
lambda k: col is None
or container_to_row_col.get(k, (None, None, None))[1] == col,
lambda k: (
secondary_y is None
or container_to_row_col.get(k, (None, None, None))[2] == secondary_y
),
]
layout_keys = reduce(
lambda last, f: filter(f, last),
layout_keys_filters,
# Natural sort keys so that xaxis20 is after xaxis3
_natural_sort_strings(list(self.layout)),
)
layout_objs = [self.layout[k] for k in layout_keys]
return _generator(self._filter_by_selector(layout_objs, [], selector))
def _select_annotations_like(
self, prop, selector=None, row=None, col=None, secondary_y=None
):
"""
Helper to select annotation-like elements from a layout object array.
Compatible with layout.annotations, layout.shapes, and layout.images
"""
xref_to_col = {}
yref_to_row = {}
yref_to_secondary_y = {}
if isinstance(row, int) or isinstance(col, int) or secondary_y is not None:
grid_ref = self._validate_get_grid_ref()
for r, subplot_row in enumerate(grid_ref):
for c, subplot_refs in enumerate(subplot_row):
if not subplot_refs:
continue
for i, subplot_ref in enumerate(subplot_refs):
if subplot_ref.subplot_type == "xy":
is_secondary_y = i == 1
xaxis, yaxis = subplot_ref.layout_keys
xref = xaxis.replace("axis", "")
yref = yaxis.replace("axis", "")
xref_to_col[xref] = c + 1
yref_to_row[yref] = r + 1
yref_to_secondary_y[yref] = is_secondary_y
# filter down (select) which graph objects, by applying the filters
# successively
def _filter_row(obj):
""" Filter objects in rows by column """
return (col is None) or (xref_to_col.get(obj.xref, None) == col)
def _filter_col(obj):
""" Filter objects in columns by row """
return (row is None) or (yref_to_row.get(obj.yref, None) == row)
def _filter_sec_y(obj):
""" Filter objects on secondary y axes """
return (secondary_y is None) or (
yref_to_secondary_y.get(obj.yref, None) == secondary_y
)
funcs = [_filter_row, _filter_col, _filter_sec_y]
return _generator(self._filter_by_selector(self.layout[prop], funcs, selector))
def _add_annotation_like(
self,
prop_singular,
prop_plural,
new_obj,
row=None,
col=None,
secondary_y=None,
exclude_empty_subplots=False,
):
# Make sure we have both row and col or neither
if row is not None and col is None:
raise ValueError(
"Received row parameter but not col.\n"
"row and col must be specified together"
)
elif col is not None and row is None:
raise ValueError(
"Received col parameter but not row.\n"
"row and col must be specified together"
)
# Address multiple subplots
if row is not None and _is_select_subplot_coordinates_arg(row, col):
# TODO product argument could be added
rows_cols = self._select_subplot_coordinates(row, col)
for r, c in rows_cols:
self._add_annotation_like(
prop_singular,
prop_plural,
new_obj,
row=r,
col=c,
secondary_y=secondary_y,
exclude_empty_subplots=exclude_empty_subplots,
)
return self
# Get grid_ref if specific row or column requested
if row is not None:
grid_ref = self._validate_get_grid_ref()
if row > len(grid_ref):
raise IndexError(
"row index %d out-of-bounds, row index must be between 1 and %d, inclusive."
% (row, len(grid_ref))
)
if col > len(grid_ref[row - 1]):
raise IndexError(
"column index %d out-of-bounds, "
"column index must be between 1 and %d, inclusive."
% (row, len(grid_ref[row - 1]))
)
refs = grid_ref[row - 1][col - 1]
if not refs:
raise ValueError(
"No subplot found at position ({r}, {c})".format(r=row, c=col)
)
if refs[0].subplot_type != "xy":
raise ValueError(
"""
Cannot add {prop_singular} to subplot at position ({r}, {c}) because subplot
is of type {subplot_type}.""".format(
prop_singular=prop_singular,
r=row,
c=col,
subplot_type=refs[0].subplot_type,
)
)
if len(refs) == 1 and secondary_y:
raise ValueError(
"""
Cannot add {prop_singular} to secondary y-axis of subplot at position ({r}, {c})
because subplot does not have a secondary y-axis"""
)
if secondary_y:
xaxis, yaxis = refs[1].layout_keys
else:
xaxis, yaxis = refs[0].layout_keys
xref, yref = xaxis.replace("axis", ""), yaxis.replace("axis", "")
# if exclude_empty_subplots is True, check to see if subplot is
# empty and return if it is
if exclude_empty_subplots and (
not self._subplot_not_empty(
xref, yref, selector=bool(exclude_empty_subplots)
)
):
return self
# in case the user specified they wanted an axis to refer to the
# domain of that axis and not the data, append ' domain' to the
# computed axis accordingly
def _add_domain(ax_letter, new_axref):
axref = ax_letter + "ref"
if axref in new_obj._props.keys() and "domain" in new_obj[axref]:
new_axref += " domain"
return new_axref
xref, yref = map(lambda t: _add_domain(*t), zip(["x", "y"], [xref, yref]))
new_obj.update(xref=xref, yref=yref)
self.layout[prop_plural] += (new_obj,)
return self
# Restyle
# -------
def plotly_restyle(self, restyle_data, trace_indexes=None, **kwargs):
"""
Perform a Plotly restyle operation on the figure's traces
Parameters
----------
restyle_data : dict
Dict of trace style updates.
Keys are strings that specify the properties to be updated.
Nested properties are expressed by joining successive keys on
'.' characters (e.g. 'marker.color').
Values may be scalars or lists. When values are scalars,
that scalar value is applied to all traces specified by the
`trace_indexes` parameter. When values are lists,
the restyle operation will cycle through the elements
of the list as it cycles through the traces specified by the
`trace_indexes` parameter.
Caution: To use plotly_restyle to update a list property (e.g.
the `x` property of the scatter trace), the property value
should be a scalar list containing the list to update with. For
example, the following command would be used to update the 'x'
property of the first trace to the list [1, 2, 3]
>>> import plotly.graph_objects as go
>>> fig = go.Figure(go.Scatter(x=[2, 4, 6]))
>>> fig.plotly_restyle({'x': [[1, 2, 3]]}, 0)
trace_indexes : int or list of int
Trace index, or list of trace indexes, that the restyle operation
applies to. Defaults to all trace indexes.
Returns
-------
None
"""
# Normalize trace indexes
# -----------------------
trace_indexes = self._normalize_trace_indexes(trace_indexes)
# Handle source_view_id
# ---------------------
# If not None, the source_view_id is the UID of the frontend
# Plotly.js view that initially triggered this restyle operation
# (e.g. the user clicked on the legend to hide a trace). We pass
# this UID along so that the frontend views can determine whether
# they need to apply the restyle operation on themselves.
source_view_id = kwargs.get("source_view_id", None)
# Perform restyle on trace dicts
# ------------------------------
restyle_changes = self._perform_plotly_restyle(restyle_data, trace_indexes)
if restyle_changes:
# The restyle operation resulted in a change to some trace
# properties, so we dispatch change callbacks and send the
# restyle message to the frontend (if any)
msg_kwargs = (
{"source_view_id": source_view_id} if source_view_id is not None else {}
)
self._send_restyle_msg(
restyle_changes, trace_indexes=trace_indexes, **msg_kwargs
)
self._dispatch_trace_change_callbacks(restyle_changes, trace_indexes)
def _perform_plotly_restyle(self, restyle_data, trace_indexes):
"""
Perform a restyle operation on the figure's traces data and return
the changes that were applied
Parameters
----------
restyle_data : dict[str, any]
See docstring for plotly_restyle
trace_indexes : list[int]
List of trace indexes that restyle operation applies to
Returns
-------
restyle_changes: dict[str, any]
Subset of restyle_data including only the keys / values that
resulted in a change to the figure's traces data
"""
# Initialize restyle changes
# --------------------------
# This will be a subset of the restyle_data including only the
# keys / values that are changed in the figure's trace data
restyle_changes = {}
# Process each key
# ----------------
for key_path_str, v in restyle_data.items():
# Track whether any of the new values are cause a change in
# self._data
any_vals_changed = False
for i, trace_ind in enumerate(trace_indexes):
if trace_ind >= len(self._data):
raise ValueError(
"Trace index {trace_ind} out of range".format(
trace_ind=trace_ind
)
)
# Get new value for this particular trace
trace_v = v[i % len(v)] if isinstance(v, list) else v
if trace_v is not Undefined:
# Get trace being updated
trace_obj = self.data[trace_ind]
# Validate key_path_str
if not BaseFigure._is_key_path_compatible(key_path_str, trace_obj):
trace_class = trace_obj.__class__.__name__
raise ValueError(
"""
Invalid property path '{key_path_str}' for trace class {trace_class}
""".format(
key_path_str=key_path_str, trace_class=trace_class
)
)
# Apply set operation for this trace and thist value
val_changed = BaseFigure._set_in(
self._data[trace_ind], key_path_str, trace_v
)
# Update any_vals_changed status
any_vals_changed = any_vals_changed or val_changed
if any_vals_changed:
restyle_changes[key_path_str] = v
return restyle_changes
def _restyle_child(self, child, key_path_str, val):
"""
Process restyle operation on a child trace object
Note: This method name/signature must match the one in
BasePlotlyType. BasePlotlyType objects call their parent's
_restyle_child method without knowing whether their parent is a
BasePlotlyType or a BaseFigure.
Parameters
----------
child : BaseTraceType
Child being restyled
key_path_str : str
A key path string (e.g. 'foo.bar[0]')
val
Restyle value
Returns
-------
None
"""
# Compute trace index
# -------------------
trace_index = child._trace_ind
# Not in batch mode
# -----------------
# Dispatch change callbacks and send restyle message
if not self._in_batch_mode:
send_val = [val]
restyle = {key_path_str: send_val}
self._send_restyle_msg(restyle, trace_indexes=trace_index)
self._dispatch_trace_change_callbacks(restyle, [trace_index])
# In batch mode
# -------------
# Add key_path_str/val to saved batch edits
else:
if trace_index not in self._batch_trace_edits:
self._batch_trace_edits[trace_index] = OrderedDict()
self._batch_trace_edits[trace_index][key_path_str] = val
def _normalize_trace_indexes(self, trace_indexes):
"""
Input trace index specification and return list of the specified trace
indexes
Parameters
----------
trace_indexes : None or int or list[int]
Returns
-------
list[int]
"""
if trace_indexes is None:
trace_indexes = list(range(len(self.data)))
if not isinstance(trace_indexes, (list, tuple)):
trace_indexes = [trace_indexes]
return list(trace_indexes)
@staticmethod
def _str_to_dict_path(key_path_str):
"""
Convert a key path string into a tuple of key path elements.
Parameters
----------
key_path_str : str
Key path string, where nested keys are joined on '.' characters
and array indexes are specified using brackets
(e.g. 'foo.bar[1]')
Returns
-------
tuple[str | int]
"""
if (
isinstance(key_path_str, string_types)
and "." not in key_path_str
and "[" not in key_path_str
and "_" not in key_path_str
):
# Fast path for common case that avoids regular expressions
return (key_path_str,)
elif isinstance(key_path_str, tuple):
# Nothing to do
return key_path_str
else:
ret = _str_to_dict_path_full(key_path_str)[0]
return ret
@staticmethod
def _set_in(d, key_path_str, v):
"""
Set a value in a nested dict using a key path string
(e.g. 'foo.bar[0]')
Parameters
----------
d : dict
Input dict to set property in
key_path_str : str
Key path string, where nested keys are joined on '.' characters
and array indexes are specified using brackets
(e.g. 'foo.bar[1]')
v
New value
Returns
-------
bool
True if set resulted in modification of dict (i.e. v was not
already present at the specified location), False otherwise.
"""
# Validate inputs
# ---------------
assert isinstance(d, dict)
# Compute key path
# ----------------
# Convert the key_path_str into a tuple of key paths
# e.g. 'foo.bar[0]' -> ('foo', 'bar', 0)
key_path = BaseFigure._str_to_dict_path(key_path_str)
# Initialize val_parent
# ---------------------
# This variable will be assigned to the parent of the next key path
# element currently being processed
val_parent = d
# Initialize parent dict or list of value to be assigned
# -----------------------------------------------------
for kp, key_path_el in enumerate(key_path[:-1]):
# Extend val_parent list if needed
if isinstance(val_parent, list) and isinstance(key_path_el, int):
while len(val_parent) <= key_path_el:
val_parent.append(None)
elif isinstance(val_parent, dict) and key_path_el not in val_parent:
if isinstance(key_path[kp + 1], int):
val_parent[key_path_el] = []
else:
val_parent[key_path_el] = {}
val_parent = val_parent[key_path_el]
# Assign value to to final parent dict or list
# --------------------------------------------
# ### Get reference to final key path element ###
last_key = key_path[-1]
# ### Track whether assignment alters parent ###
val_changed = False
# v is Undefined
# --------------
# Don't alter val_parent
if v is Undefined:
pass
# v is None
# ---------
# Check whether we can remove key from parent
elif v is None:
if isinstance(val_parent, dict):
if last_key in val_parent:
# Parent is a dict and has last_key as a current key so
# we can pop the key, which alters parent
val_parent.pop(last_key)
val_changed = True
elif isinstance(val_parent, list):
if isinstance(last_key, int) and 0 <= last_key < len(val_parent):
# Parent is a list and last_key is a valid index so we
# can set the element value to None
val_parent[last_key] = None
val_changed = True
else:
# Unsupported parent type (numpy array for example)
raise ValueError(
"""
Cannot remove element of type {typ} at location {raw_key}""".format(
typ=type(val_parent), raw_key=key_path_str
)
)
# v is a valid value
# ------------------
# Check whether parent should be updated
else:
if isinstance(val_parent, dict):
if last_key not in val_parent or not BasePlotlyType._vals_equal(
val_parent[last_key], v
):
# Parent is a dict and does not already contain the
# value v at key last_key
val_parent[last_key] = v
val_changed = True
elif isinstance(val_parent, list):
if isinstance(last_key, int):
# Extend list with Nones if needed so that last_key is
# in bounds
while len(val_parent) <= last_key:
val_parent.append(None)
if not BasePlotlyType._vals_equal(val_parent[last_key], v):
# Parent is a list and does not already contain the
# value v at index last_key
val_parent[last_key] = v
val_changed = True
else:
# Unsupported parent type (numpy array for example)
raise ValueError(
"""
Cannot set element of type {typ} at location {raw_key}""".format(
typ=type(val_parent), raw_key=key_path_str
)
)
return val_changed
# Add traces
# ----------
@staticmethod
def _raise_invalid_rows_cols(name, n, invalid):
rows_err_msg = """
If specified, the {name} parameter must be a list or tuple of integers
of length {n} (The number of traces being added)
Received: {invalid}
""".format(
name=name, n=n, invalid=invalid
)
raise ValueError(rows_err_msg)
@staticmethod
def _validate_rows_cols(name, n, vals):
if vals is None:
pass
elif isinstance(vals, (list, tuple)):
if len(vals) != n:
BaseFigure._raise_invalid_rows_cols(name=name, n=n, invalid=vals)
int_type = _get_int_type()
if [r for r in vals if not isinstance(r, int_type)]:
BaseFigure._raise_invalid_rows_cols(name=name, n=n, invalid=vals)
else:
BaseFigure._raise_invalid_rows_cols(name=name, n=n, invalid=vals)
def add_trace(
self, trace, row=None, col=None, secondary_y=None, exclude_empty_subplots=False
):
"""
Add a trace to the figure
Parameters
----------
trace : BaseTraceType or dict
Either:
- An instances of a trace classe from the plotly.graph_objs
package (e.g plotly.graph_objs.Scatter, plotly.graph_objs.Bar)
- or a dicts where:
- The 'type' property specifies the trace type (e.g.
'scatter', 'bar', 'area', etc.). If the dict has no 'type'
property then 'scatter' is assumed.
- All remaining properties are passed to the constructor
of the specified trace type.
row : 'all', int or None (default)
Subplot row index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.
If 'all', addresses all rows in the specified column(s).
col : 'all', int or None (default)
Subplot col index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.
If 'all', addresses all columns in the specified row(s).
secondary_y: boolean or None (default None)
If True, associate this trace with the secondary y-axis of the
subplot at the specified row and col. Only valid if all of the
following conditions are satisfied:
* The figure was created using `plotly.subplots.make_subplots`.
* The row and col arguments are not None
* The subplot at the specified row and col has type xy
(which is the default) and secondary_y True. These
properties are specified in the specs argument to
make_subplots. See the make_subplots docstring for more info.
* The trace argument is a 2D cartesian trace
(scatter, bar, etc.)
exclude_empty_subplots: boolean
If True, the trace will not be added to subplots that don't already
have traces.
Returns
-------
BaseFigure
The Figure that add_trace was called on
Examples
--------
>>> from plotly import subplots
>>> import plotly.graph_objs as go
Add two Scatter traces to a figure
>>> fig = go.Figure()
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2])) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2])) # doctest: +ELLIPSIS
Figure(...)
Add two Scatter traces to vertically stacked subplots
>>> fig = subplots.make_subplots(rows=2)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1) # doctest: +ELLIPSIS
Figure(...)
>>> fig.add_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1) # doctest: +ELLIPSIS
Figure(...)
"""
# Make sure we have both row and col or neither
if row is not None and col is None:
raise ValueError(
"Received row parameter but not col.\n"
"row and col must be specified together"
)
elif col is not None and row is None:
raise ValueError(
"Received col parameter but not row.\n"
"row and col must be specified together"
)
# Address multiple subplots
if row is not None and _is_select_subplot_coordinates_arg(row, col):
# TODO add product argument
rows_cols = self._select_subplot_coordinates(row, col)
for r, c in rows_cols:
self.add_trace(
trace,
row=r,
col=c,
secondary_y=secondary_y,
exclude_empty_subplots=exclude_empty_subplots,
)
return self
return self.add_traces(
data=[trace],
rows=[row] if row is not None else None,
cols=[col] if col is not None else None,
secondary_ys=[secondary_y] if secondary_y is not None else None,
exclude_empty_subplots=exclude_empty_subplots,
)
def add_traces(
self,
data,
rows=None,
cols=None,
secondary_ys=None,
exclude_empty_subplots=False,
):
"""
Add traces to the figure
Parameters
----------
data : list[BaseTraceType or dict]
A list of trace specifications to be added.
Trace specifications may be either:
- Instances of trace classes from the plotly.graph_objs
package (e.g plotly.graph_objs.Scatter, plotly.graph_objs.Bar)
- Dicts where:
- The 'type' property specifies the trace type (e.g.
'scatter', 'bar', 'area', etc.). If the dict has no 'type'
property then 'scatter' is assumed.
- All remaining properties are passed to the constructor
of the specified trace type.
rows : None, list[int], or int (default None)
List of subplot row indexes (starting from 1) for the traces to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`
If a single integer is passed, all traces will be added to row number
cols : None or list[int] (default None)
List of subplot column indexes (starting from 1) for the traces
to be added. Only valid if figure was created using
`plotly.tools.make_subplots`
If a single integer is passed, all traces will be added to column number
secondary_ys: None or list[boolean] (default None)
List of secondary_y booleans for traces to be added. See the
docstring for `add_trace` for more info.
exclude_empty_subplots: boolean
If True, the trace will not be added to subplots that don't already
have traces.
Returns
-------
BaseFigure
The Figure that add_traces was called on
Examples
--------
>>> from plotly import subplots
>>> import plotly.graph_objs as go
Add two Scatter traces to a figure
>>> fig = go.Figure()
>>> fig.add_traces([go.Scatter(x=[1,2,3], y=[2,1,2]),
... go.Scatter(x=[1,2,3], y=[2,1,2])]) # doctest: +ELLIPSIS
Figure(...)
Add two Scatter traces to vertically stacked subplots
>>> fig = subplots.make_subplots(rows=2)
>>> fig.add_traces([go.Scatter(x=[1,2,3], y=[2,1,2]),
... go.Scatter(x=[1,2,3], y=[2,1,2])],
... rows=[1, 2], cols=[1, 1]) # doctest: +ELLIPSIS
Figure(...)
"""
# Validate traces
data = self._data_validator.validate_coerce(data)
# Set trace indexes
for ind, new_trace in enumerate(data):
new_trace._trace_ind = ind + len(self.data)
# Allow integers as inputs to subplots
int_type = _get_int_type()
if isinstance(rows, int_type):
rows = [rows] * len(data)
if isinstance(cols, int_type):
cols = [cols] * len(data)
# Validate rows / cols
n = len(data)
BaseFigure._validate_rows_cols("rows", n, rows)
BaseFigure._validate_rows_cols("cols", n, cols)
# Make sure we have both rows and cols or neither
if rows is not None and cols is None:
raise ValueError(
"Received rows parameter but not cols.\n"
"rows and cols must be specified together"
)
elif cols is not None and rows is None:
raise ValueError(
"Received cols parameter but not rows.\n"
"rows and cols must be specified together"
)
# Process secondary_ys defaults
if secondary_ys is not None and rows is None:
# Default rows/cols to 1s if secondary_ys specified but not rows
# or cols
rows = [1] * len(secondary_ys)
cols = rows
elif secondary_ys is None and rows is not None:
# Default secondary_ys to Nones if secondary_ys is not specified
# but not rows and cols are specified
secondary_ys = [None] * len(rows)
# Apply rows / cols
if rows is not None:
for trace, row, col, secondary_y in zip(data, rows, cols, secondary_ys):
self._set_trace_grid_position(trace, row, col, secondary_y)
if exclude_empty_subplots:
data = list(
filter(
lambda trace: self._subplot_not_empty(
trace["xaxis"], trace["yaxis"], bool(exclude_empty_subplots)
),
data,
)
)
# Make deep copy of trace data (Optimize later if needed)
new_traces_data = [deepcopy(trace._props) for trace in data]
# Update trace parent
for trace in data:
trace._parent = self
trace._orphan_props.clear()
# Update python side
# Use extend instead of assignment so we don't trigger serialization
self._data.extend(new_traces_data)
self._data_defaults = self._data_defaults + [{} for _ in data]
self._data_objs = self._data_objs + data
# Update messages
self._send_addTraces_msg(new_traces_data)
return self
# Subplots
# --------
def print_grid(self):
"""
Print a visual layout of the figure's axes arrangement.
This is only valid for figures that are created
with plotly.tools.make_subplots.
"""
if self._grid_str is None:
raise Exception(
"Use plotly.tools.make_subplots " "to create a subplot grid."
)
print(self._grid_str)
def append_trace(self, trace, row, col):
"""
Add a trace to the figure bound to axes at the specified row,
col index.
A row, col index grid is generated for figures created with
plotly.tools.make_subplots, and can be viewed with the `print_grid`
method
Parameters
----------
trace
The data trace to be bound
row: int
Subplot row index (see Figure.print_grid)
col: int
Subplot column index (see Figure.print_grid)
Examples
--------
>>> from plotly import tools
>>> import plotly.graph_objs as go
>>> # stack two subplots vertically
>>> fig = tools.make_subplots(rows=2)
This is the format of your plot grid:
[ (1,1) x1,y1 ]
[ (2,1) x2,y2 ]
>>> fig.append_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=1, col=1)
>>> fig.append_trace(go.Scatter(x=[1,2,3], y=[2,1,2]), row=2, col=1)
"""
warnings.warn(
"""\
The append_trace method is deprecated and will be removed in a future version.
Please use the add_trace method with the row and col parameters.
""",
DeprecationWarning,
)
self.add_trace(trace=trace, row=row, col=col)
def _set_trace_grid_position(self, trace, row, col, secondary_y=False):
from plotly.subplots import _set_trace_grid_reference
grid_ref = self._validate_get_grid_ref()
return _set_trace_grid_reference(
trace, self.layout, grid_ref, row, col, secondary_y
)
def _validate_get_grid_ref(self):
try:
grid_ref = self._grid_ref
if grid_ref is None:
raise AttributeError("_grid_ref")
except AttributeError:
raise Exception(
"In order to reference traces by row and column, "
"you must first use "
"plotly.tools.make_subplots "
"to create the figure with a subplot grid."
)
return grid_ref
def _get_subplot_rows_columns(self):
"""
Returns a pair of lists, the first containing all the row indices and
the second all the column indices.
"""
# currently, this just iterates over all the rows and columns (because
# self._grid_ref is currently always rectangular)
grid_ref = self._validate_get_grid_ref()
nrows = len(grid_ref)
ncols = len(grid_ref[0])
return (range(1, nrows + 1), range(1, ncols + 1))
def _get_subplot_coordinates(self):
"""
Returns an iterator over (row,col) pairs representing all the possible
subplot coordinates.
"""
return itertools.product(*self._get_subplot_rows_columns())
def _select_subplot_coordinates(self, rows, cols, product=False):
"""
Allows selecting all or a subset of the subplots.
If any of rows or columns is 'all', product is set to True. This is
probably the expected behaviour, so that rows=1,cols='all' selects all
the columns in row 1 (otherwise it would just select the subplot in the
first row and first column).
"""
product |= any([s == "all" for s in [rows, cols]])
# TODO: If grid_ref ever becomes non-rectangular, then t should be the
# set-intersection of the result of _indexing_combinations and
# _get_subplot_coordinates, because some coordinates given by
# the _indexing_combinations function might be invalid.
t = _indexing_combinations(
[rows, cols], list(self._get_subplot_rows_columns()), product=product,
)
t = list(t)
# remove rows and cols where the subplot is "None"
grid_ref = self._validate_get_grid_ref()
t = list(filter(lambda u: grid_ref[u[0] - 1][u[1] - 1] is not None, t))
return t
def get_subplot(self, row, col, secondary_y=False):
"""
Return an object representing the subplot at the specified row
and column. May only be used on Figures created using
plotly.tools.make_subplots
Parameters
----------
row: int
1-based index of subplot row
col: int
1-based index of subplot column
secondary_y: bool
If True, select the subplot that consists of the x-axis and the
secondary y-axis at the specified row/col. Only valid if the
subplot at row/col is an 2D cartesian subplot that was created
with a secondary y-axis. See the docstring for the specs argument
to make_subplots for more info on creating a subplot with a
secondary y-axis.
Returns
-------
subplot
* None: if subplot is empty
* plotly.graph_objs.layout.Scene: if subplot type is 'scene'
* plotly.graph_objs.layout.Polar: if subplot type is 'polar'
* plotly.graph_objs.layout.Ternary: if subplot type is 'ternary'
* plotly.graph_objs.layout.Mapbox: if subplot type is 'ternary'
* SubplotDomain namedtuple with `x` and `y` fields:
if subplot type is 'domain'.
- x: length 2 list of the subplot start and stop width
- y: length 2 list of the subplot start and stop height
* SubplotXY namedtuple with `xaxis` and `yaxis` fields:
if subplot type is 'xy'.
- xaxis: plotly.graph_objs.layout.XAxis instance for subplot
- yaxis: plotly.graph_objs.layout.YAxis instance for subplot
"""
from plotly.subplots import _get_grid_subplot
return _get_grid_subplot(self, row, col, secondary_y)
# Child property operations
# -------------------------
def _get_child_props(self, child):
"""
Return the properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
"""
# Try to find index of child as a trace
# -------------------------------------
if isinstance(child, BaseTraceType):
# ### Child is a trace ###
trace_index = child._trace_ind
return self._data[trace_index]
# Child is the layout
# -------------------
elif child is self.layout:
return self._layout
# Unknown child
# -------------
else:
raise ValueError("Unrecognized child: %s" % child)
def _get_child_prop_defaults(self, child):
"""
Return the default properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
"""
# Child is a trace
# ----------------
if isinstance(child, BaseTraceType):
trace_index = child._trace_ind
return self._data_defaults[trace_index]
# Child is the layout
# -------------------
elif child is self.layout:
return self._layout_defaults
# Unknown child
# -------------
else:
raise ValueError("Unrecognized child: %s" % child)
def _init_child_props(self, child):
"""
Initialize the properites dict for a child trace or layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
None
"""
# layout and traces dict are initialize when figure is constructed
# and when new traces are added to the figure
pass
# Layout
# ------
def _initialize_layout_template(self):
import plotly.io as pio
if self._layout_obj._props.get("template", None) is None:
if pio.templates.default is not None:
# Assume default template is already validated
if self._allow_disable_validation:
self._layout_obj._validate = False
try:
if isinstance(pio.templates.default, BasePlotlyType):
# Template object. Don't want to actually import `Template`
# here for performance so we check against `BasePlotlyType`
template_object = pio.templates.default
else:
# Name of registered template object
template_object = pio.templates[pio.templates.default]
self._layout_obj.template = template_object
finally:
self._layout_obj._validate = self._validate
@property
def layout(self):
"""
The `layout` property of the figure
Returns
-------
plotly.graph_objs.Layout
"""
return self["layout"]
@layout.setter
def layout(self, new_layout):
# Validate new layout
# -------------------
new_layout = self._layout_validator.validate_coerce(new_layout)
new_layout_data = deepcopy(new_layout._props)
# Unparent current layout
# -----------------------
if self._layout_obj:
old_layout_data = deepcopy(self._layout_obj._props)
self._layout_obj._orphan_props.update(old_layout_data)
self._layout_obj._parent = None
# Parent new layout
# -----------------
self._layout = new_layout_data
new_layout._parent = self
new_layout._orphan_props.clear()
self._layout_obj = new_layout
# Initialize template object
# --------------------------
self._initialize_layout_template()
# Notify JS side
self._send_relayout_msg(new_layout_data)
def plotly_relayout(self, relayout_data, **kwargs):
"""
Perform a Plotly relayout operation on the figure's layout
Parameters
----------
relayout_data : dict
Dict of layout updates
dict keys are strings that specify the properties to be updated.
Nested properties are expressed by joining successive keys on
'.' characters (e.g. 'xaxis.range')
dict values are the values to use to update the layout.
Returns
-------
None
"""
# Handle source_view_id
# ---------------------
# If not None, the source_view_id is the UID of the frontend
# Plotly.js view that initially triggered this relayout operation
# (e.g. the user clicked on the toolbar to change the drag mode
# from zoom to pan). We pass this UID along so that the frontend
# views can determine whether they need to apply the relayout
# operation on themselves.
if "source_view_id" in kwargs:
msg_kwargs = {"source_view_id": kwargs["source_view_id"]}
else:
msg_kwargs = {}
# Perform relayout operation on layout dict
# -----------------------------------------
relayout_changes = self._perform_plotly_relayout(relayout_data)
if relayout_changes:
# The relayout operation resulted in a change to some layout
# properties, so we dispatch change callbacks and send the
# relayout message to the frontend (if any)
self._send_relayout_msg(relayout_changes, **msg_kwargs)
self._dispatch_layout_change_callbacks(relayout_changes)
def _perform_plotly_relayout(self, relayout_data):
"""
Perform a relayout operation on the figure's layout data and return
the changes that were applied
Parameters
----------
relayout_data : dict[str, any]
See the docstring for plotly_relayout
Returns
-------
relayout_changes: dict[str, any]
Subset of relayout_data including only the keys / values that
resulted in a change to the figure's layout data
"""
# Initialize relayout changes
# ---------------------------
# This will be a subset of the relayout_data including only the
# keys / values that are changed in the figure's layout data
relayout_changes = {}
# Process each key
# ----------------
for key_path_str, v in relayout_data.items():
if not BaseFigure._is_key_path_compatible(key_path_str, self.layout):
raise ValueError(
"""
Invalid property path '{key_path_str}' for layout
""".format(
key_path_str=key_path_str
)
)
# Apply set operation on the layout dict
val_changed = BaseFigure._set_in(self._layout, key_path_str, v)
if val_changed:
relayout_changes[key_path_str] = v
return relayout_changes
@staticmethod
def _is_key_path_compatible(key_path_str, plotly_obj):
"""
Return whether the specifieid key path string is compatible with
the specified plotly object for the purpose of relayout/restyle
operation
"""
# Convert string to tuple of path components
# e.g. 'foo[0].bar[1]' -> ('foo', 0, 'bar', 1)
key_path_tuple = BaseFigure._str_to_dict_path(key_path_str)
# Remove trailing integer component
# e.g. ('foo', 0, 'bar', 1) -> ('foo', 0, 'bar')
# We do this because it's fine for relayout/restyle to create new
# elements in the final array in the path.
if isinstance(key_path_tuple[-1], int):
key_path_tuple = key_path_tuple[:-1]
# Test whether modified key path tuple is in plotly_obj
return key_path_tuple in plotly_obj
def _relayout_child(self, child, key_path_str, val):
"""
Process relayout operation on child layout object
Parameters
----------
child : BaseLayoutType
The figure's layout
key_path_str :
A key path string (e.g. 'foo.bar[0]')
val
Relayout value
Returns
-------
None
"""
# Validate input
# --------------
assert child is self.layout
# Not in batch mode
# -------------
# Dispatch change callbacks and send relayout message
if not self._in_batch_mode:
relayout_msg = {key_path_str: val}
self._send_relayout_msg(relayout_msg)
self._dispatch_layout_change_callbacks(relayout_msg)
# In batch mode
# -------------
# Add key_path_str/val to saved batch edits
else:
self._batch_layout_edits[key_path_str] = val
# Dispatch change callbacks
# -------------------------
@staticmethod
def _build_dispatch_plan(key_path_strs):
"""
Build a dispatch plan for a list of key path strings
A dispatch plan is a dict:
- *from* path tuples that reference an object that has descendants
that are referenced in `key_path_strs`.
- *to* sets of tuples that correspond to descendants of the object
above.
Parameters
----------
key_path_strs : list[str]
List of key path strings. For example:
['xaxis.rangeselector.font.color', 'xaxis.rangeselector.bgcolor']
Returns
-------
dispatch_plan: dict[tuple[str|int], set[tuple[str|int]]]
Examples
--------
>>> key_path_strs = ['xaxis.rangeselector.font.color',
... 'xaxis.rangeselector.bgcolor']
>>> BaseFigure._build_dispatch_plan(key_path_strs) # doctest: +SKIP
{(): {'xaxis',
('xaxis', 'rangeselector'),
('xaxis', 'rangeselector', 'bgcolor'),
('xaxis', 'rangeselector', 'font'),
('xaxis', 'rangeselector', 'font', 'color')},
('xaxis',): {('rangeselector',),
('rangeselector', 'bgcolor'),
('rangeselector', 'font'),
('rangeselector', 'font', 'color')},
('xaxis', 'rangeselector'): {('bgcolor',),
('font',),
('font', 'color')},
('xaxis', 'rangeselector', 'font'): {('color',)}}
"""
dispatch_plan = {}
for key_path_str in key_path_strs:
key_path = BaseFigure._str_to_dict_path(key_path_str)
key_path_so_far = ()
keys_left = key_path
# Iterate down the key path
for next_key in key_path:
if key_path_so_far not in dispatch_plan:
dispatch_plan[key_path_so_far] = set()
to_add = [keys_left[: i + 1] for i in range(len(keys_left))]
dispatch_plan[key_path_so_far].update(to_add)
key_path_so_far = key_path_so_far + (next_key,)
keys_left = keys_left[1:]
return dispatch_plan
def _dispatch_layout_change_callbacks(self, relayout_data):
"""
Dispatch property change callbacks given relayout_data
Parameters
----------
relayout_data : dict[str, any]
See docstring for plotly_relayout.
Returns
-------
None
"""
# Build dispatch plan
# -------------------
key_path_strs = list(relayout_data.keys())
dispatch_plan = BaseFigure._build_dispatch_plan(key_path_strs)
# Dispatch changes to each layout objects
# ---------------------------------------
for path_tuple, changed_paths in dispatch_plan.items():
if path_tuple in self.layout:
dispatch_obj = self.layout[path_tuple]
if isinstance(dispatch_obj, BasePlotlyType):
dispatch_obj._dispatch_change_callbacks(changed_paths)
def _dispatch_trace_change_callbacks(self, restyle_data, trace_indexes):
"""
Dispatch property change callbacks given restyle_data
Parameters
----------
restyle_data : dict[str, any]
See docstring for plotly_restyle.
trace_indexes : list[int]
List of trace indexes that restyle operation applied to
Returns
-------
None
"""
# Build dispatch plan
# -------------------
key_path_strs = list(restyle_data.keys())
dispatch_plan = BaseFigure._build_dispatch_plan(key_path_strs)
# Dispatch changes to each object in each trace
# ---------------------------------------------
for path_tuple, changed_paths in dispatch_plan.items():
for trace_ind in trace_indexes:
trace = self.data[trace_ind]
if path_tuple in trace:
dispatch_obj = trace[path_tuple]
if isinstance(dispatch_obj, BasePlotlyType):
dispatch_obj._dispatch_change_callbacks(changed_paths)
# Frames
# ------
@property
def frames(self):
"""
The `frames` property is a tuple of the figure's frame objects
Returns
-------
tuple[plotly.graph_objs.Frame]
"""
return self["frames"]
@frames.setter
def frames(self, new_frames):
# Note: Frames are not supported by the FigureWidget subclass so we
# only validate coerce the frames. We don't emit any events on frame
# changes, and we don't reparent the frames.
# Validate frames
self._frame_objs = self._frames_validator.validate_coerce(new_frames)
# Update
# ------
def plotly_update(
self, restyle_data=None, relayout_data=None, trace_indexes=None, **kwargs
):
"""
Perform a Plotly update operation on the figure.
Note: This operation both mutates and returns the figure
Parameters
----------
restyle_data : dict
Traces update specification. See the docstring for the
`plotly_restyle` method for details
relayout_data : dict
Layout update specification. See the docstring for the
`plotly_relayout` method for details
trace_indexes :
Trace index, or list of trace indexes, that the update operation
applies to. Defaults to all trace indexes.
Returns
-------
BaseFigure
None
"""
# Handle source_view_id
# ---------------------
# If not None, the source_view_id is the UID of the frontend
# Plotly.js view that initially triggered this update operation
# (e.g. the user clicked a button that triggered an update
# operation). We pass this UID along so that the frontend views can
# determine whether they need to apply the update operation on
# themselves.
if "source_view_id" in kwargs:
msg_kwargs = {"source_view_id": kwargs["source_view_id"]}
else:
msg_kwargs = {}
# Perform update operation
# ------------------------
# This updates the _data and _layout dicts, and returns the changes
# to the traces (restyle_changes) and layout (relayout_changes)
(
restyle_changes,
relayout_changes,
trace_indexes,
) = self._perform_plotly_update(
restyle_data=restyle_data,
relayout_data=relayout_data,
trace_indexes=trace_indexes,
)
# Send update message
# -------------------
# Send a plotly_update message to the frontend (if any)
if restyle_changes or relayout_changes:
self._send_update_msg(
restyle_data=restyle_changes,
relayout_data=relayout_changes,
trace_indexes=trace_indexes,
**msg_kwargs
)
# Dispatch changes
# ----------------
# ### Dispatch restyle changes ###
if restyle_changes:
self._dispatch_trace_change_callbacks(restyle_changes, trace_indexes)
# ### Dispatch relayout changes ###
if relayout_changes:
self._dispatch_layout_change_callbacks(relayout_changes)
def _perform_plotly_update(
self, restyle_data=None, relayout_data=None, trace_indexes=None
):
# Check for early exist
# ---------------------
if not restyle_data and not relayout_data:
# Nothing to do
return None, None, None
# Normalize input
# ---------------
if restyle_data is None:
restyle_data = {}
if relayout_data is None:
relayout_data = {}
trace_indexes = self._normalize_trace_indexes(trace_indexes)
# Perform relayout
# ----------------
relayout_changes = self._perform_plotly_relayout(relayout_data)
# Perform restyle
# ---------------
restyle_changes = self._perform_plotly_restyle(restyle_data, trace_indexes)
# Return changes
# --------------
return restyle_changes, relayout_changes, trace_indexes
# Plotly message stubs
# --------------------
# send-message stubs that may be overridden by the widget subclass
def _send_addTraces_msg(self, new_traces_data):
pass
def _send_moveTraces_msg(self, current_inds, new_inds):
pass
def _send_deleteTraces_msg(self, delete_inds):
pass
def _send_restyle_msg(self, style, trace_indexes=None, source_view_id=None):
pass
def _send_relayout_msg(self, layout, source_view_id=None):
pass
def _send_update_msg(
self, restyle_data, relayout_data, trace_indexes=None, source_view_id=None
):
pass
def _send_animate_msg(
self, styles_data, relayout_data, trace_indexes, animation_opts
):
pass
# Context managers
# ----------------
@contextmanager
def batch_update(self):
"""
A context manager that batches up trace and layout assignment
operations into a singe plotly_update message that is executed when
the context exits.
Examples
--------
For example, suppose we have a figure widget, `fig`, with a single
trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
If we want to update the xaxis range, the yaxis range, and the
marker color, we could do so using a series of three property
assignments as follows:
>>> fig.layout.xaxis.range = [0, 5]
>>> fig.layout.yaxis.range = [0, 10]
>>> fig.data[0].marker.color = 'green'
This will work, however it will result in three messages being
sent to the front end (two relayout messages for the axis range
updates followed by one restyle message for the marker color
update). This can cause the plot to appear to stutter as the
three updates are applied incrementally.
We can avoid this problem by performing these three assignments in a
`batch_update` context as follows:
>>> with fig.batch_update():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
... fig.data[0].marker.color = 'green'
Now, these three property updates will be sent to the frontend in a
single update message, and they will be applied by the front end
simultaneously.
"""
if self._in_batch_mode is True:
yield
else:
try:
self._in_batch_mode = True
yield
finally:
# ### Disable batch mode ###
self._in_batch_mode = False
# ### Build plotly_update params ###
(
restyle_data,
relayout_data,
trace_indexes,
) = self._build_update_params_from_batch()
# ### Call plotly_update ###
self.plotly_update(
restyle_data=restyle_data,
relayout_data=relayout_data,
trace_indexes=trace_indexes,
)
# ### Clear out saved batch edits ###
self._batch_layout_edits.clear()
self._batch_trace_edits.clear()
def _build_update_params_from_batch(self):
"""
Convert `_batch_trace_edits` and `_batch_layout_edits` into the
`restyle_data`, `relayout_data`, and `trace_indexes` params accepted
by the `plotly_update` method.
Returns
-------
(dict, dict, list[int])
"""
# Handle Style / Trace Indexes
# ----------------------------
batch_style_commands = self._batch_trace_edits
trace_indexes = sorted(set([trace_ind for trace_ind in batch_style_commands]))
all_props = sorted(
set(
[
prop
for trace_style in self._batch_trace_edits.values()
for prop in trace_style
]
)
)
# Initialize restyle_data dict with all values undefined
restyle_data = {
prop: [Undefined for _ in range(len(trace_indexes))] for prop in all_props
}
# Fill in values
for trace_ind, trace_style in batch_style_commands.items():
for trace_prop, trace_val in trace_style.items():
restyle_trace_index = trace_indexes.index(trace_ind)
restyle_data[trace_prop][restyle_trace_index] = trace_val
# Handle Layout
# -------------
relayout_data = self._batch_layout_edits
# Return plotly_update params
# ---------------------------
return restyle_data, relayout_data, trace_indexes
@contextmanager
def batch_animate(self, duration=500, easing="cubic-in-out"):
"""
Context manager to animate trace / layout updates
Parameters
----------
duration : number
The duration of the transition, in milliseconds.
If equal to zero, updates are synchronous.
easing : string
The easing function used for the transition.
One of:
- linear
- quad
- cubic
- sin
- exp
- circle
- elastic
- back
- bounce
- linear-in
- quad-in
- cubic-in
- sin-in
- exp-in
- circle-in
- elastic-in
- back-in
- bounce-in
- linear-out
- quad-out
- cubic-out
- sin-out
- exp-out
- circle-out
- elastic-out
- back-out
- bounce-out
- linear-in-out
- quad-in-out
- cubic-in-out
- sin-in-out
- exp-in-out
- circle-in-out
- elastic-in-out
- back-in-out
- bounce-in-out
Examples
--------
Suppose we have a figure widget, `fig`, with a single trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
1) Animate a change in the xaxis and yaxis ranges using default
duration and easing parameters.
>>> with fig.batch_animate():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
2) Animate a change in the size and color of the trace's markers
over 2 seconds using the elastic-in-out easing method
>>> with fig.batch_animate(duration=2000, easing='elastic-in-out'):
... fig.data[0].marker.color = 'green'
... fig.data[0].marker.size = 20
"""
# Validate inputs
# ---------------
duration = self._animation_duration_validator.validate_coerce(duration)
easing = self._animation_easing_validator.validate_coerce(easing)
if self._in_batch_mode is True:
yield
else:
try:
self._in_batch_mode = True
yield
finally:
# Exit batch mode
# ---------------
self._in_batch_mode = False
# Apply batch animate
# -------------------
self._perform_batch_animate(
{
"transition": {"duration": duration, "easing": easing},
"frame": {"duration": duration},
}
)
def _perform_batch_animate(self, animation_opts):
"""
Perform the batch animate operation
This method should be called with the batch_animate() context
manager exits.
Parameters
----------
animation_opts : dict
Animation options as accepted by frontend Plotly.animation command
Returns
-------
None
"""
# Apply commands to internal dictionaries as an update
# ----------------------------------------------------
(
restyle_data,
relayout_data,
trace_indexes,
) = self._build_update_params_from_batch()
(
restyle_changes,
relayout_changes,
trace_indexes,
) = self._perform_plotly_update(restyle_data, relayout_data, trace_indexes)
# Convert style / trace_indexes into animate form
# -----------------------------------------------
if self._batch_trace_edits:
animate_styles, animate_trace_indexes = zip(
*[
(trace_style, trace_index)
for trace_index, trace_style in self._batch_trace_edits.items()
]
)
else:
animate_styles, animate_trace_indexes = {}, []
animate_layout = copy(self._batch_layout_edits)
# Send animate message
# --------------------
# Sends animate message to the front end (if any)
self._send_animate_msg(
styles_data=list(animate_styles),
relayout_data=animate_layout,
trace_indexes=list(animate_trace_indexes),
animation_opts=animation_opts,
)
# Clear batched commands
# ----------------------
self._batch_layout_edits.clear()
self._batch_trace_edits.clear()
# Dispatch callbacks
# ------------------
# ### Dispatch restyle changes ###
if restyle_changes:
self._dispatch_trace_change_callbacks(restyle_changes, trace_indexes)
# ### Dispatch relayout changes ###
if relayout_changes:
self._dispatch_layout_change_callbacks(relayout_changes)
# Exports
# -------
def to_dict(self):
"""
Convert figure to a dictionary
Note: the dictionary includes the properties explicitly set by the
user, it does not include default values of unspecified properties
Returns
-------
dict
"""
# Handle data
# -----------
data = deepcopy(self._data)
# Handle layout
# -------------
layout = deepcopy(self._layout)
# Handle frames
# -------------
# Frame key is only added if there are any frames
res = {"data": data, "layout": layout}
frames = deepcopy([frame._props for frame in self._frame_objs])
if frames:
res["frames"] = frames
return res
def to_plotly_json(self):
"""
Convert figure to a JSON representation as a Python dict
Returns
-------
dict
"""
return self.to_dict()
@staticmethod
def _to_ordered_dict(d, skip_uid=False):
"""
Static helper for converting dict or list to structure of ordered
dictionaries
"""
if isinstance(d, dict):
# d is a dict
result = collections.OrderedDict()
for key in sorted(d.keys()):
if skip_uid and key == "uid":
continue
else:
result[key] = BaseFigure._to_ordered_dict(d[key], skip_uid=skip_uid)
elif isinstance(d, list) and d and isinstance(d[0], dict):
# d is a list of dicts
result = [BaseFigure._to_ordered_dict(el, skip_uid=skip_uid) for el in d]
else:
result = d
return result
def to_ordered_dict(self, skip_uid=True):
# Initialize resulting OrderedDict
# --------------------------------
result = collections.OrderedDict()
# Handle data
# -----------
result["data"] = BaseFigure._to_ordered_dict(self._data, skip_uid=skip_uid)
# Handle layout
# -------------
result["layout"] = BaseFigure._to_ordered_dict(self._layout)
# Handle frames
# -------------
if self._frame_objs:
frames_props = [frame._props for frame in self._frame_objs]
result["frames"] = BaseFigure._to_ordered_dict(frames_props)
return result
# plotly.io methods
# -----------------
# Note that docstrings are auto-generated in plotly/_docstring_gen.py
def show(self, *args, **kwargs):
"""
Show a figure using either the default renderer(s) or the renderer(s)
specified by the renderer argument
Parameters
----------
renderer: str or None (default None)
A string containing the names of one or more registered renderers
(separated by '+' characters) or None. If None, then the default
renderers specified in plotly.io.renderers.default are used.
validate: bool (default True)
True if the figure should be validated before being shown,
False otherwise.
width: int or float
An integer or float that determines the number of pixels wide the
plot is. The default is set in plotly.js.
height: int or float
An integer or float that determines the number of pixels wide the
plot is. The default is set in plotly.js.
config: dict
A dict of parameters to configure the figure. The defaults are set
in plotly.js.
Returns
-------
None
"""
import plotly.io as pio
return pio.show(self, *args, **kwargs)
def to_json(self, *args, **kwargs):
"""
Convert a figure to a JSON string representation
Parameters
----------
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
Returns
-------
str
Representation of figure as a JSON string
"""
import plotly.io as pio
return pio.to_json(self, *args, **kwargs)
def full_figure_for_development(self, warn=True, as_dict=False):
"""
Compute default values for all attributes not specified in the input figure and
returns the output as a "full" figure. This function calls Plotly.js via Kaleido
to populate unspecified attributes. This function is intended for interactive use
during development to learn more about how Plotly.js computes default values and is
not generally necessary or recommended for production use.
Parameters
----------
fig:
Figure object or dict representing a figure
warn: bool
If False, suppress warnings about not using this in production.
as_dict: bool
If True, output is a dict with some keys that go.Figure can't parse.
If False, output is a go.Figure with unparseable keys skipped.
Returns
-------
plotly.graph_objects.Figure or dict
The full figure
"""
import plotly.io as pio
return pio.full_figure_for_development(self, warn, as_dict)
def write_json(self, *args, **kwargs):
"""
Convert a figure to JSON and write it to a file or writeable
object
Parameters
----------
file: str or writeable
A string representing a local file path or a writeable object
(e.g. an open file descriptor)
pretty: bool (default False)
True if JSON representation should be pretty-printed, False if
representation should be as compact as possible.
remove_uids: bool (default True)
True if trace UIDs should be omitted from the JSON representation
Returns
-------
None
"""
import plotly.io as pio
return pio.write_json(self, *args, **kwargs)
def to_html(self, *args, **kwargs):
"""
Convert a figure to an HTML string representation.
Parameters
----------
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
Returns
-------
str
Representation of figure as an HTML div string
"""
import plotly.io as pio
return pio.to_html(self, *args, **kwargs)
def write_html(self, *args, **kwargs):
"""
Write a figure to an HTML file representation
Parameters
----------
file: str or writeable
A string representing a local file path or a writeable object
(e.g. an open file descriptor)
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file path
and `full_html` is True then
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
path and `full_html` is True, then the plotly.min.js bundle is copied
into the directory of the resulting HTML file. If a file named
plotly.min.js already exists in the output directory then this file
is left unmodified and no copy is performed. HTML files generated
with this option can be used offline, but they require a copy of
the plotly.min.js bundle in the same directory. This option is
useful when many figures will be saved as HTML files in the same
directory because the plotly.js source code will be included only
once per output directory, rather than once per output file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
auto_open: bool (default True
If True, open the saved file in a web browser after saving.
This argument only applies if `full_html` is True.
Returns
-------
str
Representation of figure as an HTML div string
"""
import plotly.io as pio
return pio.write_html(self, *args, **kwargs)
def to_image(self, *args, **kwargs):
"""
Convert a figure to a static image bytes string
Parameters
----------
format: str or None
The desired image format. One of
- 'png'
- 'jpg' or 'jpeg'
- 'webp'
- 'svg'
- 'pdf'
- 'eps' (Requires the poppler library to be installed)
If not specified, will default to `plotly.io.config.default_format`
width: int or None
The width of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the width of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_width`
height: int or None
The height of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the height of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_height`
scale: int or float or None
The scale factor to use when exporting the figure. A scale factor
larger than 1.0 will increase the image resolution with respect
to the figure's layout pixel dimensions. Whereas as scale factor of
less than 1.0 will decrease the image resolution.
If not specified, will default to `plotly.io.config.default_scale`
validate: bool
True if the figure should be validated before being converted to
an image, False otherwise.
engine: str
Image export engine to use:
- "kaleido": Use Kaleido for image export
- "orca": Use Orca for image export
- "auto" (default): Use Kaleido if installed, otherwise use orca
Returns
-------
bytes
The image data
"""
import plotly.io as pio
return pio.to_image(self, *args, **kwargs)
def write_image(self, *args, **kwargs):
"""
Convert a figure to a static image and write it to a file or writeable
object
Parameters
----------
file: str or writeable
A string representing a local file path or a writeable object
(e.g. an open file descriptor)
format: str or None
The desired image format. One of
- 'png'
- 'jpg' or 'jpeg'
- 'webp'
- 'svg'
- 'pdf'
- 'eps' (Requires the poppler library to be installed)
If not specified and `file` is a string then this will default to the
file extension. If not specified and `file` is not a string then this
will default to `plotly.io.config.default_format`
width: int or None
The width of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the width of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_width`
height: int or None
The height of the exported image in layout pixels. If the `scale`
property is 1.0, this will also be the height of the exported image
in physical pixels.
If not specified, will default to `plotly.io.config.default_height`
scale: int or float or None
The scale factor to use when exporting the figure. A scale factor
larger than 1.0 will increase the image resolution with respect
to the figure's layout pixel dimensions. Whereas as scale factor of
less than 1.0 will decrease the image resolution.
If not specified, will default to `plotly.io.config.default_scale`
validate: bool
True if the figure should be validated before being converted to
an image, False otherwise.
engine: str
Image export engine to use:
- "kaleido": Use Kaleido for image export
- "orca": Use Orca for image export
- "auto" (default): Use Kaleido if installed, otherwise use orca
Returns
-------
None
"""
import plotly.io as pio
return pio.write_image(self, *args, **kwargs)
# Static helpers
# --------------
@staticmethod
def _is_dict_list(v):
"""
Return true of the input object is a list of dicts
"""
return isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict)
@staticmethod
def _perform_update(plotly_obj, update_obj, overwrite=False):
"""
Helper to support the update() methods on :class:`BaseFigure` and
:class:`BasePlotlyType`
Parameters
----------
plotly_obj : BasePlotlyType|tuple[BasePlotlyType]
Object to up updated
update_obj : dict|list[dict]|tuple[dict]
When ``plotly_obj`` is an instance of :class:`BaseFigure`,
``update_obj`` should be a dict
When ``plotly_obj`` is a tuple of instances of
:class:`BasePlotlyType`, ``update_obj`` should be a tuple or list
of dicts
"""
from _plotly_utils.basevalidators import (
CompoundValidator,
CompoundArrayValidator,
)
if update_obj is None:
# Nothing to do
return
elif isinstance(plotly_obj, BasePlotlyType):
# Handle initializing subplot ids
# -------------------------------
# This should be valid even if xaxis2 hasn't been initialized:
# >>> layout.update(xaxis2={'title': 'xaxis 2'})
for key in update_obj:
err = _check_path_in_prop_tree(plotly_obj, key, error_cast=ValueError)
if err is not None:
if isinstance(plotly_obj, BaseLayoutType):
# try _subplot_re_match
match = plotly_obj._subplot_re_match(key)
if match:
# We need to create a subplotid object
plotly_obj[key] = {}
continue
# If no match, raise the error, which should already
# contain the _raise_on_invalid_property_error
# generated message
raise err
# Convert update_obj to dict
# --------------------------
if isinstance(update_obj, BasePlotlyType):
update_obj = update_obj.to_plotly_json()
# Process valid properties
# ------------------------
for key in update_obj:
val = update_obj[key]
if overwrite:
# Don't recurse and assign property as-is
plotly_obj[key] = val
continue
validator = plotly_obj._get_prop_validator(key)
if isinstance(validator, CompoundValidator) and isinstance(val, dict):
# Update compound objects recursively
# plotly_obj[key].update(val)
BaseFigure._perform_update(plotly_obj[key], val)
elif isinstance(validator, CompoundArrayValidator):
if plotly_obj[key]:
# plotly_obj has an existing non-empty array for key
# In this case we merge val into the existing elements
BaseFigure._perform_update(plotly_obj[key], val)
# If update tuple is longer that current tuple, append the
# extra elements to the end
if isinstance(val, (list, tuple)) and len(val) > len(
plotly_obj[key]
):
plotly_obj[key] = plotly_obj[key] + tuple(
val[len(plotly_obj[key]) :]
)
else:
# plotly_obj is an empty or uninitialized list for key
# In this case we accept val as is
plotly_obj[key] = val
else:
# Assign non-compound value
plotly_obj[key] = val
elif isinstance(plotly_obj, tuple):
if len(update_obj) == 0:
# Nothing to do
return
else:
for i, plotly_element in enumerate(plotly_obj):
if isinstance(update_obj, dict):
if i in update_obj:
update_element = update_obj[i]
else:
continue
else:
update_element = update_obj[i % len(update_obj)]
BaseFigure._perform_update(plotly_element, update_element)
else:
raise ValueError(
"Unexpected plotly object with type {typ}".format(typ=type(plotly_obj))
)
@staticmethod
def _index_is(iterable, val):
"""
Return the index of a value in an iterable using object identity
(not object equality as is the case for list.index)
"""
index_list = [i for i, curr_val in enumerate(iterable) if curr_val is val]
if not index_list:
raise ValueError("Invalid value")
return index_list[0]
def _make_axis_spanning_layout_object(self, direction, shape):
"""
Convert a shape drawn on a plot or a subplot into one whose yref or xref
ends with " domain" and has coordinates so that the shape will seem to
extend infinitely in that dimension. This is useful for drawing lines or
boxes on a plot where one dimension of the shape will not move out of
bounds when moving the plot's view.
Note that the shape already added to the (sub)plot must have the
corresponding axis reference referring to an actual axis (e.g., 'x',
'y2' etc. are accepted, but not 'paper'). This will be the case if the
shape was added with "add_shape".
Shape must have the x0, x1, y0, y1 fields already initialized.
"""
if direction == "vertical":
# fix y points to top and bottom of subplot
ref = "yref"
elif direction == "horizontal":
# fix x points to left and right of subplot
ref = "xref"
else:
raise ValueError(
"Bad direction: %s. Permissible values are 'vertical' and 'horizontal'."
% (direction,)
)
# set the ref to "<axis_id> domain" so that its size is based on the
# axis's size
shape[ref] += " domain"
return shape
def _process_multiple_axis_spanning_shapes(
self,
shape_args,
row,
col,
shape_type,
exclude_empty_subplots=True,
annotation=None,
**kwargs
):
"""
Add a shape or multiple shapes and call _make_axis_spanning_layout_object on
all the new shapes.
"""
if shape_type in ["vline", "vrect"]:
direction = "vertical"
elif shape_type in ["hline", "hrect"]:
direction = "horizontal"
else:
raise ValueError(
"Bad shape_type %s, needs to be one of 'vline', 'hline', 'vrect', 'hrect'"
% (shape_type,)
)
if (row is not None or col is not None) and (not self._has_subplots()):
# this has no subplots to address, so we force row and col to be None
row = None
col = None
n_shapes_before = len(self.layout["shapes"])
n_annotations_before = len(self.layout["annotations"])
# shapes are always added at the end of the tuple of shapes, so we see
# how long the tuple is before the call and after the call, and adjust
# the new shapes that were added at the end
# extract annotation prefixed kwargs
# annotation with extra parameters based on the annotation_position
# argument and other annotation_ prefixed kwargs
shape_kwargs, annotation_kwargs = shapeannotation.split_dict_by_key_prefix(
kwargs, "annotation_"
)
augmented_annotation = shapeannotation.axis_spanning_shape_annotation(
annotation, shape_type, shape_args, annotation_kwargs
)
self.add_shape(
row=row,
col=col,
exclude_empty_subplots=exclude_empty_subplots,
**_combine_dicts([shape_args, shape_kwargs])
)
if augmented_annotation is not None:
self.add_annotation(
augmented_annotation,
row=row,
col=col,
exclude_empty_subplots=exclude_empty_subplots,
)
# update xref and yref for the new shapes and annotations
for layout_obj, n_layout_objs_before in zip(
["shapes", "annotations"], [n_shapes_before, n_annotations_before]
):
n_layout_objs_after = len(self.layout[layout_obj])
if (n_layout_objs_after > n_layout_objs_before) and (
row is None and col is None
):
# this was called intending to add to a single plot (and
# self.add_{layout_obj} succeeded)
# however, in the case of a single plot, xref and yref are not
# specified, so we specify them here so the following routines can work
# (they need to append " domain" to xref or yref)
self.layout[layout_obj][-1].update(xref="x", yref="y")
new_layout_objs = tuple(
filter(
lambda x: x is not None,
[
self._make_axis_spanning_layout_object(
direction, self.layout[layout_obj][n],
)
for n in range(n_layout_objs_before, n_layout_objs_after)
],
)
)
self.layout[layout_obj] = (
self.layout[layout_obj][:n_layout_objs_before] + new_layout_objs
)
def add_vline(
self,
x,
row="all",
col="all",
exclude_empty_subplots=True,
annotation=None,
**kwargs
):
self._process_multiple_axis_spanning_shapes(
dict(type="line", x0=x, x1=x, y0=0, y1=1),
row,
col,
"vline",
exclude_empty_subplots=exclude_empty_subplots,
annotation=annotation,
**kwargs
)
return self
add_vline.__doc__ = _axis_spanning_shapes_docstr("vline")
def add_hline(self, y, row="all", col="all", exclude_empty_subplots=True, **kwargs):
self._process_multiple_axis_spanning_shapes(
dict(type="line", x0=0, x1=1, y0=y, y1=y,),
row,
col,
"hline",
exclude_empty_subplots=exclude_empty_subplots,
**kwargs
)
return self
add_hline.__doc__ = _axis_spanning_shapes_docstr("hline")
def add_vrect(
self, x0, x1, row="all", col="all", exclude_empty_subplots=True, **kwargs
):
self._process_multiple_axis_spanning_shapes(
dict(type="rect", x0=x0, x1=x1, y0=0, y1=1),
row,
col,
"vrect",
exclude_empty_subplots=exclude_empty_subplots,
**kwargs
)
return self
add_vrect.__doc__ = _axis_spanning_shapes_docstr("vrect")
def add_hrect(
self, y0, y1, row="all", col="all", exclude_empty_subplots=True, **kwargs
):
self._process_multiple_axis_spanning_shapes(
dict(type="rect", x0=0, x1=1, y0=y0, y1=y1),
row,
col,
"hrect",
exclude_empty_subplots=exclude_empty_subplots,
**kwargs
)
return self
add_hrect.__doc__ = _axis_spanning_shapes_docstr("hrect")
def _has_subplots(self):
""" Returns True if figure contains subplots, otherwise it contains a
single plot and so this returns False. """
return self._grid_ref is not None
def _subplot_not_empty(self, xref, yref, selector="all"):
"""
xref: string representing the axis. Objects in the plot will be checked
for this xref (for layout objects) or xaxis (for traces) to
determine if they lie in a certain subplot.
yref: string representing the axis. Objects in the plot will be checked
for this yref (for layout objects) or yaxis (for traces) to
determine if they lie in a certain subplot.
selector: can be "all" or an iterable containing some combination of
"traces", "shapes", "annotations", "images". Only the presence
of objects specified in selector will be checked. So if
["traces","shapes"] is passed then a plot we be considered
non-empty if it contains traces or shapes. If
bool(selector) returns False, no checking is performed and
this function returns True. If selector is True, it is
converted to "all".
"""
if not selector:
# If nothing to select was specified then a subplot is always deemed non-empty
return True
if selector is True:
selector = "all"
if selector == "all":
selector = ["traces", "shapes", "annotations", "images"]
ret = False
for s in selector:
if s == "traces":
obj = self.data
xaxiskw = "xaxis"
yaxiskw = "yaxis"
elif s in ["shapes", "annotations", "images"]:
obj = self.layout[s]
xaxiskw = "xref"
yaxiskw = "yref"
else:
obj = None
if obj:
ret |= any(
t == (xref, yref)
for t in [
# if a object exists but has no xaxis or yaxis keys, then it
# is plotted with xaxis/xref 'x' and yaxis/yref 'y'
(
"x" if d[xaxiskw] is None else d[xaxiskw],
"y" if d[yaxiskw] is None else d[yaxiskw],
)
for d in obj
]
)
return ret
def set_subplots(self, rows=None, cols=None, **make_subplots_args):
"""
Add subplots to this figure. If the figure already contains subplots,
then this throws an error. Accepts any keyword arguments that
plotly.subplots.make_subplots accepts.
"""
# rows, cols provided so that this can be called like
# fig.set_subplots(2,3), say
if rows is not None:
make_subplots_args["rows"] = rows
if cols is not None:
make_subplots_args["cols"] = cols
if self._has_subplots():
raise ValueError("This figure already has subplots.")
return subplots.make_subplots(figure=self, **make_subplots_args)
class BasePlotlyType(object):
"""
BasePlotlyType is the base class for all objects in the trace, layout,
and frame object hierarchies
"""
# ### Mapped (deprecated) properties ###
# dict for deprecated property name (e.g. 'titlefont') to tuple
# of relative path to new property (e.g. ('title', 'font')
_mapped_properties = {}
_parent_path_str = ""
_path_str = ""
_valid_props = set()
def __init__(self, plotly_name, **kwargs):
"""
Construct a new BasePlotlyType
Parameters
----------
plotly_name : str
The lowercase name of the plotly object
kwargs : dict
Invalid props/values to raise on
"""
# ### _skip_invalid ##
# If True, then invalid properties should be skipped, if False then
# invalid properties will result in an exception
self._skip_invalid = False
self._validate = True
# Validate inputs
# ---------------
self._process_kwargs(**kwargs)
# Store params
# ------------
self._plotly_name = plotly_name
# Initialize properties
# ---------------------
# ### _compound_props ###
# A dict from compound property names to compound objects
self._compound_props = {}
# ### _compound_array_props ###
# A dict from compound array property names to tuples of compound
# objects
self._compound_array_props = {}
# ### _orphan_props ###
# A dict of properties for use while object has no parent. When
# object has a parent, it requests its properties dict from its
# parent and doesn't use this.
self._orphan_props = {}
# ### _parent ###
# The parent of the object. May be another BasePlotlyType or it may
# be a BaseFigure (as is the case for the Layout and Trace objects)
self._parent = None
# ### _change_callbacks ###
# A dict from tuples of child property path tuples to lists
# of callbacks that should be executed whenever any of these
# properties is modified
self._change_callbacks = {}
# ### Backing property for backward compatible _validator property ##
self.__validators = None
# @property
# def _validate(self):
# fig = self.figure
# if fig is None:
# return True
# else:
# return fig._validate
def _get_validator(self, prop):
from .validator_cache import ValidatorCache
return ValidatorCache.get_validator(self._path_str, prop)
@property
def _validators(self):
"""
Validators used to be stored in a private _validators property. This was
eliminated when we switched to building validators on demand using the
_get_validator method.
This property returns a simple object that
Returns
-------
dict-like interface for accessing the object's validators
"""
obj = self
if self.__validators is None:
class ValidatorCompat(object):
def __getitem__(self, item):
return obj._get_validator(item)
def __contains__(self, item):
return obj.__contains__(item)
def __iter__(self):
return iter(obj)
def items(self):
return [(k, self[k]) for k in self]
self.__validators = ValidatorCompat()
return self.__validators
def _process_kwargs(self, **kwargs):
"""
Process any extra kwargs that are not predefined as constructor params
"""
for k, v in kwargs.items():
err = _check_path_in_prop_tree(self, k, error_cast=ValueError)
if err is None:
# e.g. underscore kwargs like marker_line_color
self[k] = v
elif not self._validate:
# Set extra property as-is
self[k] = v
elif not self._skip_invalid:
raise err
# No need to call _raise_on_invalid_property_error here,
# because we have it set up so that the singular case of calling
# __setitem__ will raise this. If _check_path_in_prop_tree
# raised that in its travels, it will already be in the error
# message.
@property
def plotly_name(self):
"""
The plotly name of the object
Returns
-------
str
"""
return self._plotly_name
@property
def _prop_descriptions(self):
"""
Formatted string containing all of this obejcts child properties
and their descriptions
Returns
-------
str
"""
raise NotImplementedError
@property
def _props(self):
"""
Dictionary used to store this object properties. When the object
has a parent, this dict is retreived from the parent. When the
object does not have a parent, this dict is the object's
`_orphan_props` property
Note: Property will return None if the object has a parent and the
object's properties have not been initialized using the
`_init_props` method.
Returns
-------
dict|None
"""
if self.parent is None:
# Use orphan data
return self._orphan_props
else:
# Get data from parent's dict
return self.parent._get_child_props(self)
def _get_child_props(self, child):
"""
Return properties dict for child
Parameters
----------
child : BasePlotlyType
Returns
-------
dict
"""
if self._props is None:
# If this node's properties are uninitialized then so are its
# child's
return None
else:
# ### Child a compound property ###
if child.plotly_name in self:
from _plotly_utils.basevalidators import (
CompoundValidator,
CompoundArrayValidator,
)
validator = self._get_validator(child.plotly_name)
if isinstance(validator, CompoundValidator):
return self._props.get(child.plotly_name, None)
# ### Child an element of a compound array property ###
elif isinstance(validator, CompoundArrayValidator):
children = self[child.plotly_name]
child_ind = BaseFigure._index_is(children, child)
assert child_ind is not None
children_props = self._props.get(child.plotly_name, None)
return (
children_props[child_ind]
if children_props is not None
and len(children_props) > child_ind
else None
)
# ### Invalid child ###
else:
raise ValueError("Invalid child with name: %s" % child.plotly_name)
def _init_props(self):
"""
Ensure that this object's properties dict has been initialized. When
the object has a parent, this ensures that the parent has an
initialized properties dict with this object's plotly_name as a key.
Returns
-------
None
"""
# Ensure that _data is initialized.
if self._props is not None:
pass
else:
self._parent._init_child_props(self)
def _init_child_props(self, child):
"""
Ensure that a properties dict has been initialized for a child object
Parameters
----------
child : BasePlotlyType
Returns
-------
None
"""
# Init our own properties
# -----------------------
self._init_props()
# Child a compound property
# -------------------------
if child.plotly_name in self._compound_props:
if child.plotly_name not in self._props:
self._props[child.plotly_name] = {}
# Child an element of a compound array property
# ---------------------------------------------
elif child.plotly_name in self._compound_array_props:
children = self._compound_array_props[child.plotly_name]
child_ind = BaseFigure._index_is(children, child)
assert child_ind is not None
if child.plotly_name not in self._props:
# Initialize list
self._props[child.plotly_name] = []
# Make sure list is long enough for child
children_list = self._props[child.plotly_name]
while len(children_list) <= child_ind:
children_list.append({})
# Invalid child
# -------------
else:
raise ValueError("Invalid child with name: %s" % child.plotly_name)
def _get_child_prop_defaults(self, child):
"""
Return default properties dict for child
Parameters
----------
child : BasePlotlyType
Returns
-------
dict
"""
if self._prop_defaults is None:
# If this node's default properties are uninitialized then so are
# its child's
return None
else:
# ### Child a compound property ###
if child.plotly_name in self._compound_props:
return self._prop_defaults.get(child.plotly_name, None)
# ### Child an element of a compound array property ###
elif child.plotly_name in self._compound_array_props:
children = self._compound_array_props[child.plotly_name]
child_ind = BaseFigure._index_is(children, child)
assert child_ind is not None
children_props = self._prop_defaults.get(child.plotly_name, None)
return (
children_props[child_ind]
if children_props is not None and len(children_props) > child_ind
else None
)
# ### Invalid child ###
else:
raise ValueError("Invalid child with name: %s" % child.plotly_name)
@property
def _prop_defaults(self):
"""
Return default properties dict
Returns
-------
dict
"""
if self.parent is None:
return None
else:
return self.parent._get_child_prop_defaults(self)
def _get_prop_validator(self, prop):
"""
Return the validator associated with the specified property
Parameters
----------
prop: str
A property that exists in this object
Returns
-------
BaseValidator
"""
# Handle remapping
# ----------------
if prop in self._mapped_properties:
prop_path = self._mapped_properties[prop]
plotly_obj = self[prop_path[:-1]]
prop = prop_path[-1]
else:
prop_path = BaseFigure._str_to_dict_path(prop)
plotly_obj = self[prop_path[:-1]]
prop = prop_path[-1]
# Return validator
# ----------------
return plotly_obj._get_validator(prop)
@property
def parent(self):
"""
Return the object's parent, or None if the object has no parent
Returns
-------
BasePlotlyType|BaseFigure
"""
return self._parent
@property
def figure(self):
"""
Reference to the top-level Figure or FigureWidget that this object
belongs to. None if the object does not belong to a Figure
Returns
-------
Union[BaseFigure, None]
"""
top_parent = self
while top_parent is not None:
if isinstance(top_parent, BaseFigure):
break
else:
top_parent = top_parent.parent
return top_parent
# Magic Methods
# -------------
def __reduce__(self):
"""
Custom implementation of reduce is used to support deep copying
and pickling
"""
props = self.to_plotly_json()
return (self.__class__, (props,))
def __getitem__(self, prop):
"""
Get item or nested item from object
Parameters
----------
prop : str|tuple
If prop is the name of a property of this object, then the
property is returned.
If prop is a nested property path string (e.g. 'foo[1].bar'),
then a nested property is returned (e.g. obj['foo'][1]['bar'])
If prop is a path tuple (e.g. ('foo', 1, 'bar')), then a nested
property is returned (e.g. obj['foo'][1]['bar']).
Returns
-------
Any
"""
from _plotly_utils.basevalidators import (
CompoundValidator,
CompoundArrayValidator,
BaseDataValidator,
)
# Normalize prop
# --------------
# Convert into a property tuple
orig_prop = prop
prop = BaseFigure._str_to_dict_path(prop)
# Handle remapping
# ----------------
if prop and prop[0] in self._mapped_properties:
prop = self._mapped_properties[prop[0]] + prop[1:]
orig_prop = _remake_path_from_tuple(prop)
# Handle scalar case
# ------------------
# e.g. ('foo',)
if len(prop) == 1:
# Unwrap scalar tuple
prop = prop[0]
if prop not in self._valid_props:
self._raise_on_invalid_property_error(_error_to_raise=PlotlyKeyError)(
prop
)
validator = self._get_validator(prop)
if isinstance(validator, CompoundValidator):
if self._compound_props.get(prop, None) is None:
# Init compound objects
self._compound_props[prop] = validator.data_class(
_parent=self, plotly_name=prop
)
# Update plotly_name value in case the validator applies
# non-standard name (e.g. imagedefaults instead of image)
self._compound_props[prop]._plotly_name = prop
return validator.present(self._compound_props[prop])
elif isinstance(validator, (CompoundArrayValidator, BaseDataValidator)):
if self._compound_array_props.get(prop, None) is None:
# Init list of compound objects
if self._props is not None:
self._compound_array_props[prop] = [
validator.data_class(_parent=self)
for _ in self._props.get(prop, [])
]
else:
self._compound_array_props[prop] = []
return validator.present(self._compound_array_props[prop])
elif self._props is not None and prop in self._props:
return validator.present(self._props[prop])
elif self._prop_defaults is not None:
return validator.present(self._prop_defaults.get(prop, None))
else:
return None
# Handle non-scalar case
# ----------------------
# e.g. ('foo', 1), ()
else:
err = _check_path_in_prop_tree(self, orig_prop, error_cast=PlotlyKeyError)
if err is not None:
raise err
res = self
for p in prop:
res = res[p]
return res
def __contains__(self, prop):
"""
Determine whether object contains a property or nested property
Parameters
----------
prop : str|tuple
If prop is a simple string (e.g. 'foo'), then return true of the
object contains an element named 'foo'
If prop is a property path string (e.g. 'foo[0].bar'),
then return true if the obejct contains the nested elements for
each entry in the path string (e.g. 'bar' in obj['foo'][0])
If prop is a property path tuple (e.g. ('foo', 0, 'bar')),
then return true if the object contains the nested elements for
each entry in the path string (e.g. 'bar' in obj['foo'][0])
Returns
-------
bool
"""
prop = BaseFigure._str_to_dict_path(prop)
# Handle remapping
if prop and prop[0] in self._mapped_properties:
prop = self._mapped_properties[prop[0]] + prop[1:]
obj = self
for p in prop:
if isinstance(p, int):
if isinstance(obj, tuple) and 0 <= p < len(obj):
obj = obj[p]
else:
return False
else:
if obj is not None and p in obj._valid_props:
obj = obj[p]
else:
return False
return True
def __setitem__(self, prop, value):
"""
Parameters
----------
prop : str
The name of a direct child of this object
Note: Setting nested properties using property path string or
property path tuples is not supported.
value
New property value
Returns
-------
None
"""
from _plotly_utils.basevalidators import (
CompoundValidator,
CompoundArrayValidator,
BaseDataValidator,
)
# Normalize prop
# --------------
# Convert into a property tuple
orig_prop = prop
prop = BaseFigure._str_to_dict_path(prop)
# Handle empty case
# -----------------
if len(prop) == 0:
raise KeyError(orig_prop)
# Handle remapping
# ----------------
if prop[0] in self._mapped_properties:
prop = self._mapped_properties[prop[0]] + prop[1:]
# Handle scalar case
# ------------------
# e.g. ('foo',)
if len(prop) == 1:
# ### Unwrap scalar tuple ###
prop = prop[0]
if self._validate:
if prop not in self._valid_props:
self._raise_on_invalid_property_error()(prop)
# ### Get validator for this property ###
validator = self._get_validator(prop)
# ### Handle compound property ###
if isinstance(validator, CompoundValidator):
self._set_compound_prop(prop, value)
# ### Handle compound array property ###
elif isinstance(validator, (CompoundArrayValidator, BaseDataValidator)):
self._set_array_prop(prop, value)
# ### Handle simple property ###
else:
self._set_prop(prop, value)
else:
# Make sure properties dict is initialized
self._init_props()
if isinstance(value, BasePlotlyType):
# Extract json from graph objects
value = value.to_plotly_json()
# Check for list/tuple of graph objects
if (
isinstance(value, (list, tuple))
and value
and isinstance(value[0], BasePlotlyType)
):
value = [
v.to_plotly_json() if isinstance(v, BasePlotlyType) else v
for v in value
]
self._props[prop] = value
# Remove any already constructed graph object so that it will be
# reconstructed on property access
self._compound_props.pop(prop, None)
self._compound_array_props.pop(prop, None)
# Handle non-scalar case
# ----------------------
# e.g. ('foo', 1), ()
else:
err = _check_path_in_prop_tree(self, orig_prop, error_cast=ValueError)
if err is not None:
raise err
res = self
for p in prop[:-1]:
res = res[p]
res._validate = self._validate
res[prop[-1]] = value
def __setattr__(self, prop, value):
"""
Parameters
----------
prop : str
The name of a direct child of this object
value
New property value
Returns
-------
None
"""
if prop.startswith("_") or hasattr(self, prop) or prop in self._valid_props:
# Let known properties and private properties through
super(BasePlotlyType, self).__setattr__(prop, value)
else:
# Raise error on unknown public properties
self._raise_on_invalid_property_error()(prop)
def __iter__(self):
"""
Return an iterator over the object's properties
"""
res = list(self._valid_props)
for prop in self._mapped_properties:
res.append(prop)
return iter(res)
def __eq__(self, other):
"""
Test for equality
To be considered equal, `other` must have the same type as this object
and their `to_plotly_json` representaitons must be identical.
Parameters
----------
other
The object to compare against
Returns
-------
bool
"""
if not isinstance(other, self.__class__):
# Require objects to be of the same plotly type
return False
else:
# Compare plotly_json representations
# Use _vals_equal instead of `==` to handle cases where
# underlying dicts contain numpy arrays
return BasePlotlyType._vals_equal(
self._props if self._props is not None else {},
other._props if other._props is not None else {},
)
@staticmethod
def _build_repr_for_class(props, class_name, parent_path_str=None):
"""
Helper to build representation string for a class
Parameters
----------
class_name : str
Name of the class being represented
parent_path_str : str of None (default)
Name of the class's parent package to display
props : dict
Properties to unpack into the constructor
Returns
-------
str
The representation string
"""
from plotly.utils import ElidedPrettyPrinter
if parent_path_str:
class_name = parent_path_str + "." + class_name
if len(props) == 0:
repr_str = class_name + "()"
else:
pprinter = ElidedPrettyPrinter(threshold=200, width=120)
pprint_res = pprinter.pformat(props)
# pprint_res is indented by 1 space. Add extra 3 spaces for PEP8
# complaint indent
body = " " + pprint_res[1:-1].replace("\n", "\n ")
repr_str = class_name + "({\n " + body + "\n})"
return repr_str
def __repr__(self):
"""
Customize object representation when displayed in the
terminal/notebook
"""
from _plotly_utils.basevalidators import LiteralValidator
# Get all properties
props = self._props if self._props is not None else {}
# Remove literals (These can't be specified in the constructor)
props = {
p: v
for p, v in props.items()
if p in self._valid_props
and not isinstance(self._get_validator(p), LiteralValidator)
}
# Elide template
if "template" in props:
props["template"] = "..."
# Build repr string
repr_str = BasePlotlyType._build_repr_for_class(
props=props,
class_name=self.__class__.__name__,
parent_path_str=self._parent_path_str,
)
return repr_str
def _raise_on_invalid_property_error(self, _error_to_raise=None):
"""
Returns a function that raises informative exception when invalid
property names are encountered. The _error_to_raise argument allows
specifying the exception to raise, which is ValueError if None.
Parameters
----------
args : list[str]
List of property names that have already been determined to be
invalid
Raises
------
ValueError by default, or _error_to_raise if not None
"""
if _error_to_raise is None:
_error_to_raise = ValueError
def _ret(*args):
invalid_props = args
if invalid_props:
if len(invalid_props) == 1:
prop_str = "property"
invalid_str = repr(invalid_props[0])
else:
prop_str = "properties"
invalid_str = repr(invalid_props)
module_root = "plotly.graph_objs."
if self._parent_path_str:
full_obj_name = (
module_root
+ self._parent_path_str
+ "."
+ self.__class__.__name__
)
else:
full_obj_name = module_root + self.__class__.__name__
guessed_prop = None
if len(invalid_props) == 1:
try:
guessed_prop = find_closest_string(
invalid_props[0], self._valid_props
)
except Exception:
pass
guessed_prop_suggestion = ""
if guessed_prop is not None:
guessed_prop_suggestion = 'Did you mean "%s"?' % (guessed_prop,)
raise _error_to_raise(
"Invalid {prop_str} specified for object of type "
"{full_obj_name}: {invalid_str}\n"
"\n{guessed_prop_suggestion}\n"
"\n Valid properties:\n"
"{prop_descriptions}"
"\n{guessed_prop_suggestion}\n".format(
prop_str=prop_str,
full_obj_name=full_obj_name,
invalid_str=invalid_str,
prop_descriptions=self._prop_descriptions,
guessed_prop_suggestion=guessed_prop_suggestion,
)
)
return _ret
def update(self, dict1=None, overwrite=False, **kwargs):
"""
Update the properties of an object with a dict and/or with
keyword arguments.
This recursively updates the structure of the original
object with the values in the input dict / keyword arguments.
Parameters
----------
dict1 : dict
Dictionary of properties to be updated
overwrite: bool
If True, overwrite existing properties. If False, apply updates
to existing properties recursively, preserving existing
properties that are not specified in the update operation.
kwargs :
Keyword/value pair of properties to be updated
Returns
-------
BasePlotlyType
Updated plotly object
"""
if self.figure:
with self.figure.batch_update():
BaseFigure._perform_update(self, dict1, overwrite=overwrite)
BaseFigure._perform_update(self, kwargs, overwrite=overwrite)
else:
BaseFigure._perform_update(self, dict1, overwrite=overwrite)
BaseFigure._perform_update(self, kwargs, overwrite=overwrite)
return self
def pop(self, key, *args):
"""
Remove the value associated with the specified key and return it
Parameters
----------
key: str
Property name
dflt
The default value to return if key was not found in object
Returns
-------
value
The removed value that was previously associated with key
Raises
------
KeyError
If key is not in object and no dflt argument specified
"""
# Handle default
if key not in self and args:
return args[0]
elif key in self:
val = self[key]
self[key] = None
return val
else:
raise KeyError(key)
@property
def _in_batch_mode(self):
"""
True if the object belongs to a figure that is currently in batch mode
Returns
-------
bool
"""
return self.parent and self.parent._in_batch_mode
def _set_prop(self, prop, val):
"""
Set the value of a simple property
Parameters
----------
prop : str
Name of a simple (non-compound, non-array) property
val
The new property value
Returns
-------
Any
The coerced assigned value
"""
# val is Undefined
# ----------------
if val is Undefined:
# Do nothing
return
# Import value
# ------------
validator = self._get_validator(prop)
try:
val = validator.validate_coerce(val)
except ValueError as err:
if self._skip_invalid:
return
else:
raise err
# val is None
# -----------
if val is None:
# Check if we should send null update
if self._props and prop in self._props:
# Remove property if not in batch mode
if not self._in_batch_mode:
self._props.pop(prop)
# Send property update message
self._send_prop_set(prop, val)
# val is valid value
# ------------------
else:
# Make sure properties dict is initialized
self._init_props()
# Check whether the value is a change
if prop not in self._props or not BasePlotlyType._vals_equal(
self._props[prop], val
):
# Set property value if not in batch mode
if not self._in_batch_mode:
self._props[prop] = val
# Send property update message
self._send_prop_set(prop, val)
return val
def _set_compound_prop(self, prop, val):
"""
Set the value of a compound property
Parameters
----------
prop : str
Name of a compound property
val
The new property value
Returns
-------
BasePlotlyType
The coerced assigned object
"""
# val is Undefined
# ----------------
if val is Undefined:
# Do nothing
return
# Import value
# ------------
validator = self._get_validator(prop)
val = validator.validate_coerce(val, skip_invalid=self._skip_invalid)
# Save deep copies of current and new states
# ------------------------------------------
curr_val = self._compound_props.get(prop, None)
if curr_val is not None:
curr_dict_val = deepcopy(curr_val._props)
else:
curr_dict_val = None
if val is not None:
new_dict_val = deepcopy(val._props)
else:
new_dict_val = None
# Update _props dict
# ------------------
if not self._in_batch_mode:
if not new_dict_val:
if self._props and prop in self._props:
self._props.pop(prop)
else:
self._init_props()
self._props[prop] = new_dict_val
# Send update if there was a change in value
# ------------------------------------------
if not BasePlotlyType._vals_equal(curr_dict_val, new_dict_val):
self._send_prop_set(prop, new_dict_val)
# Reparent
# --------
# ### Reparent new value and clear orphan data ###
if isinstance(val, BasePlotlyType):
val._parent = self
val._orphan_props.clear()
# ### Unparent old value and update orphan data ###
if curr_val is not None:
if curr_dict_val is not None:
curr_val._orphan_props.update(curr_dict_val)
curr_val._parent = None
# Update _compound_props
# ----------------------
self._compound_props[prop] = val
return val
def _set_array_prop(self, prop, val):
"""
Set the value of a compound property
Parameters
----------
prop : str
Name of a compound property
val
The new property value
Returns
-------
tuple[BasePlotlyType]
The coerced assigned object
"""
# val is Undefined
# ----------------
if val is Undefined:
# Do nothing
return
# Import value
# ------------
validator = self._get_validator(prop)
val = validator.validate_coerce(val, skip_invalid=self._skip_invalid)
# Save deep copies of current and new states
# ------------------------------------------
curr_val = self._compound_array_props.get(prop, None)
if curr_val is not None:
curr_dict_vals = [deepcopy(cv._props) for cv in curr_val]
else:
curr_dict_vals = None
if val is not None:
new_dict_vals = [deepcopy(nv._props) for nv in val]
else:
new_dict_vals = None
# Update _props dict
# ------------------
if not self._in_batch_mode:
if not new_dict_vals:
if self._props and prop in self._props:
self._props.pop(prop)
else:
self._init_props()
self._props[prop] = new_dict_vals
# Send update if there was a change in value
# ------------------------------------------
if not BasePlotlyType._vals_equal(curr_dict_vals, new_dict_vals):
self._send_prop_set(prop, new_dict_vals)
# Reparent
# --------
# ### Reparent new values and clear orphan data ###
if val is not None:
for v in val:
v._orphan_props.clear()
v._parent = self
# ### Unparent old value and update orphan data ###
if curr_val is not None:
for cv, cv_dict in zip(curr_val, curr_dict_vals):
if cv_dict is not None:
cv._orphan_props.update(cv_dict)
cv._parent = None
# Update _compound_array_props
# ----------------------------
self._compound_array_props[prop] = val
return val
def _send_prop_set(self, prop_path_str, val):
"""
Notify parent that a property has been set to a new value
Parameters
----------
prop_path_str : str
Property path string (e.g. 'foo[0].bar') of property that
was set, relative to this object
val
New value for property. Either a simple value, a dict,
or a tuple of dicts. This should *not* be a BasePlotlyType object.
Returns
-------
None
"""
raise NotImplementedError()
def _prop_set_child(self, child, prop_path_str, val):
"""
Propagate property setting notification from child to parent
Parameters
----------
child : BasePlotlyType
Child object
prop_path_str : str
Property path string (e.g. 'foo[0].bar') of property that
was set, relative to `child`
val
New value for property. Either a simple value, a dict,
or a tuple of dicts. This should *not* be a BasePlotlyType object.
Returns
-------
None
"""
# Child is compound array property
# --------------------------------
child_prop_val = getattr(self, child.plotly_name)
if isinstance(child_prop_val, (list, tuple)):
child_ind = BaseFigure._index_is(child_prop_val, child)
obj_path = "{child_name}.{child_ind}.{prop}".format(
child_name=child.plotly_name, child_ind=child_ind, prop=prop_path_str
)
# Child is compound property
# --------------------------
else:
obj_path = "{child_name}.{prop}".format(
child_name=child.plotly_name, prop=prop_path_str
)
# Propagate to parent
# -------------------
self._send_prop_set(obj_path, val)
def _restyle_child(self, child, prop, val):
"""
Propagate _restyle_child to parent
Note: This method must match the name and signature of the
corresponding method on BaseFigure
"""
self._prop_set_child(child, prop, val)
def _relayout_child(self, child, prop, val):
"""
Propagate _relayout_child to parent
Note: This method must match the name and signature of the
corresponding method on BaseFigure
"""
self._prop_set_child(child, prop, val)
# Callbacks
# ---------
def _dispatch_change_callbacks(self, changed_paths):
"""
Execute the appropriate change callback functions given a set of
changed property path tuples
Parameters
----------
changed_paths : set[tuple[int|str]]
Returns
-------
None
"""
# Loop over registered callbacks
# ------------------------------
for prop_path_tuples, callbacks in self._change_callbacks.items():
# ### Compute callback paths that changed ###
common_paths = changed_paths.intersection(set(prop_path_tuples))
if common_paths:
# #### Invoke callback ####
callback_args = [self[cb_path] for cb_path in prop_path_tuples]
for callback in callbacks:
callback(self, *callback_args)
def on_change(self, callback, *args, **kwargs):
"""
Register callback function to be called when certain properties or
subproperties of this object are modified.
Callback will be invoked whenever ANY of these properties is
modified. Furthermore, the callback will only be invoked once even
if multiple properties are modified during the same restyle /
relayout / update operation.
Parameters
----------
callback : function
Function that accepts 1 + len(`args`) parameters. First parameter
is this object. Second through last parameters are the
property / subpropery values referenced by args.
args : list[str|tuple[int|str]]
List of property references where each reference may be one of:
1) A property name string (e.g. 'foo') for direct properties
2) A property path string (e.g. 'foo[0].bar') for
subproperties
3) A property path tuple (e.g. ('foo', 0, 'bar')) for
subproperties
append : bool
True if callback should be appended to previously registered
callback on the same properties, False if callback should replace
previously registered callbacks on the same properties. Defaults
to False.
Examples
--------
Register callback that prints out the range extents of the xaxis and
yaxis whenever either either of them changes.
>>> import plotly.graph_objects as go
>>> fig = go.Figure(go.Scatter(x=[1, 2], y=[1, 0]))
>>> fig.layout.on_change(
... lambda obj, xrange, yrange: print("%s-%s" % (xrange, yrange)),
... ('xaxis', 'range'), ('yaxis', 'range'))
Returns
-------
None
"""
# Warn if object not descendent of a figure
# -----------------------------------------
if not self.figure:
class_name = self.__class__.__name__
msg = """
{class_name} object is not a descendant of a Figure.
on_change callbacks are not supported in this case.
""".format(
class_name=class_name
)
raise ValueError(msg)
# Validate args not empty
# -----------------------
if len(args) == 0:
raise ValueError("At least one change property must be specified")
# Validate args
# -------------
invalid_args = [arg for arg in args if arg not in self]
if invalid_args:
raise ValueError("Invalid property specification(s): %s" % invalid_args)
# Process append option
# ---------------------
append = kwargs.get("append", False)
# Normalize args to path tuples
# -----------------------------
arg_tuples = tuple([BaseFigure._str_to_dict_path(a) for a in args])
# Initialize callbacks list
# -------------------------
# Initialize an empty callbacks list if there are no previously
# defined callbacks for this collection of args, or if append is False
if arg_tuples not in self._change_callbacks or not append:
self._change_callbacks[arg_tuples] = []
# Register callback
# -----------------
self._change_callbacks[arg_tuples].append(callback)
def to_plotly_json(self):
"""
Return plotly JSON representation of object as a Python dict
Returns
-------
dict
"""
return deepcopy(self._props if self._props is not None else {})
@staticmethod
def _vals_equal(v1, v2):
"""
Recursive equality function that handles nested dicts / tuples / lists
that contain numpy arrays.
v1
First value to compare
v2
Second value to compare
Returns
-------
bool
True if v1 and v2 are equal, False otherwise
"""
np = get_module("numpy", should_load=False)
if np is not None and (
isinstance(v1, np.ndarray) or isinstance(v2, np.ndarray)
):
return np.array_equal(v1, v2)
elif isinstance(v1, (list, tuple)):
# Handle recursive equality on lists and tuples
return (
isinstance(v2, (list, tuple))
and len(v1) == len(v2)
and all(BasePlotlyType._vals_equal(e1, e2) for e1, e2 in zip(v1, v2))
)
elif isinstance(v1, dict):
# Handle recursive equality on dicts
return (
isinstance(v2, dict)
and set(v1.keys()) == set(v2.keys())
and all(BasePlotlyType._vals_equal(v1[k], v2[k]) for k in v1)
)
else:
return v1 == v2
class BaseLayoutHierarchyType(BasePlotlyType):
"""
Base class for all types in the layout hierarchy
"""
@property
def _parent_path_str(self):
pass
def __init__(self, plotly_name, **kwargs):
super(BaseLayoutHierarchyType, self).__init__(plotly_name, **kwargs)
def _send_prop_set(self, prop_path_str, val):
if self.parent:
# ### Inform parent of relayout operation ###
self.parent._relayout_child(self, prop_path_str, val)
class BaseLayoutType(BaseLayoutHierarchyType):
"""
Base class for the layout type. The Layout class itself is a
code-generated subclass.
"""
# Dynamic properties
# ------------------
# Unlike all other plotly types, BaseLayoutType has dynamic properties.
# These are used when a layout has multiple instances of subplot types
# (xaxis2, yaxis3, geo4, etc.)
#
# The base version of each suplot type is defined in the schema and code
# generated. So the Layout subclass has statically defined properties
# for xaxis, yaxis, geo, ternary, and scene. But, we need to dynamically
# generated properties/validators as needed for xaxis2, yaxis3, etc.
@property
def _subplotid_validators(self):
"""
dict of validator classes for each subplot type
Returns
-------
dict
"""
raise NotImplementedError()
def _subplot_re_match(self, prop):
raise NotImplementedError()
def __init__(self, plotly_name, **kwargs):
"""
Construct a new BaseLayoutType object
Parameters
----------
plotly_name : str
Name of the object (should always be 'layout')
kwargs : dict[str, any]
Properties that were not recognized by the Layout subclass.
These are subplot identifiers (xaxis2, geo4, etc.) or they are
invalid properties.
"""
# Validate inputs
# ---------------
assert plotly_name == "layout"
# Call superclass constructor
# ---------------------------
super(BaseLayoutHierarchyType, self).__init__(plotly_name)
# Initialize _subplotid_props
# ---------------------------
# This is a set storing the names of the layout's dynamic subplot
# properties
self._subplotid_props = set()
# Process kwargs
# --------------
self._process_kwargs(**kwargs)
def _process_kwargs(self, **kwargs):
"""
Process any extra kwargs that are not predefined as constructor params
"""
unknown_kwargs = {
k: v for k, v in kwargs.items() if not self._subplot_re_match(k)
}
super(BaseLayoutHierarchyType, self)._process_kwargs(**unknown_kwargs)
subplot_kwargs = {k: v for k, v in kwargs.items() if self._subplot_re_match(k)}
for prop, value in subplot_kwargs.items():
self._set_subplotid_prop(prop, value)
def _set_subplotid_prop(self, prop, value):
"""
Set a subplot property on the layout
Parameters
----------
prop : str
A valid subplot property
value
Subplot value
"""
# Get regular expression match
# ----------------------------
# Note: we already tested that match exists in the constructor
match = self._subplot_re_match(prop)
subplot_prop = match.group(1)
suffix_digit = int(match.group(2))
# Validate suffix digit
# ---------------------
if suffix_digit == 0:
raise TypeError(
"Subplot properties may only be suffixed by an "
"integer >= 1\n"
"Received {k}".format(k=prop)
)
# Handle suffix_digit == 1
# ------------------------
# In this case we remove suffix digit (e.g. xaxis1 -> xaxis)
if suffix_digit == 1:
prop = subplot_prop
# Construct and add validator
# ---------------------------
if prop not in self._valid_props:
self._valid_props.add(prop)
# Import value
# ------------
# Use the standard _set_compound_prop method to
# validate/coerce/import subplot value. This must be called AFTER
# the validator instance is added to self._validators above.
self._set_compound_prop(prop, value)
self._subplotid_props.add(prop)
def _strip_subplot_suffix_of_1(self, prop):
"""
Strip the suffix for subplot property names that have a suffix of 1.
All other properties are returned unchanged
e.g. 'xaxis1' -> 'xaxis'
Parameters
----------
prop : str|tuple
Returns
-------
str|tuple
"""
# Let parent handle non-scalar cases
# ----------------------------------
# e.g. ('xaxis', 'range') or 'xaxis.range'
prop_tuple = BaseFigure._str_to_dict_path(prop)
if len(prop_tuple) != 1 or not isinstance(prop_tuple[0], string_types):
return prop
else:
# Unwrap to scalar string
prop = prop_tuple[0]
# Handle subplot suffix digit of 1
# --------------------------------
# Remove digit of 1 from subplot id (e.g.. xaxis1 -> xaxis)
match = self._subplot_re_match(prop)
if match:
subplot_prop = match.group(1)
suffix_digit = int(match.group(2))
if subplot_prop and suffix_digit == 1:
prop = subplot_prop
return prop
def _get_prop_validator(self, prop):
"""
Custom _get_prop_validator that handles subplot properties
"""
prop = self._strip_subplot_suffix_of_1(prop)
return super(BaseLayoutHierarchyType, self)._get_prop_validator(prop)
def __getattr__(self, prop):
"""
Custom __getattr__ that handles dynamic subplot properties
"""
prop = self._strip_subplot_suffix_of_1(prop)
if prop != "_subplotid_props" and prop in self._subplotid_props:
validator = self._get_validator(prop)
return validator.present(self._compound_props[prop])
else:
return super(BaseLayoutHierarchyType, self).__getattribute__(prop)
def __getitem__(self, prop):
"""
Custom __getitem__ that handles dynamic subplot properties
"""
prop = self._strip_subplot_suffix_of_1(prop)
return super(BaseLayoutHierarchyType, self).__getitem__(prop)
def __contains__(self, prop):
"""
Custom __contains__ that handles dynamic subplot properties
"""
prop = self._strip_subplot_suffix_of_1(prop)
return super(BaseLayoutHierarchyType, self).__contains__(prop)
def __setitem__(self, prop, value):
"""
Custom __setitem__ that handles dynamic subplot properties
"""
# Convert prop to prop tuple
# --------------------------
prop_tuple = BaseFigure._str_to_dict_path(prop)
if len(prop_tuple) != 1 or not isinstance(prop_tuple[0], string_types):
# Let parent handle non-scalar non-string cases
super(BaseLayoutHierarchyType, self).__setitem__(prop, value)
return
else:
# Unwrap prop tuple
prop = prop_tuple[0]
# Check for subplot assignment
# ----------------------------
match = self._subplot_re_match(prop)
if match is None:
# Set as ordinary property
super(BaseLayoutHierarchyType, self).__setitem__(prop, value)
else:
# Set as subplotid property
self._set_subplotid_prop(prop, value)
def __setattr__(self, prop, value):
"""
Custom __setattr__ that handles dynamic subplot properties
"""
# Check for subplot assignment
# ----------------------------
match = self._subplot_re_match(prop)
if match is None:
# Set as ordinary property
super(BaseLayoutHierarchyType, self).__setattr__(prop, value)
else:
# Set as subplotid property
self._set_subplotid_prop(prop, value)
def __dir__(self):
"""
Custom __dir__ that handles dynamic subplot properties
"""
# Include any active subplot values
if six.PY2:
def get_attrs(obj):
import types
if not hasattr(obj, "__dict__"):
return []
if not isinstance(obj.__dict__, (dict, types.DictProxyType)):
raise TypeError("%s.__dict__ is not a dictionary" "" % obj.__name__)
return obj.__dict__.keys()
def dir2(obj):
attrs = set()
if not hasattr(obj, "__bases__"):
# obj is an instance
if not hasattr(obj, "__class__"):
# slots
return sorted(get_attrs(obj))
klass = obj.__class__
attrs.update(get_attrs(klass))
else:
# obj is a class
klass = obj
for cls in klass.__bases__:
attrs.update(get_attrs(cls))
attrs.update(dir2(cls))
attrs.update(get_attrs(obj))
return list(attrs)
return dir2(self) + sorted(self._subplotid_props)
else:
return list(super(BaseLayoutHierarchyType, self).__dir__()) + sorted(
self._subplotid_props
)
class BaseTraceHierarchyType(BasePlotlyType):
"""
Base class for all types in the trace hierarchy
"""
def __init__(self, plotly_name, **kwargs):
super(BaseTraceHierarchyType, self).__init__(plotly_name, **kwargs)
def _send_prop_set(self, prop_path_str, val):
if self.parent:
# ### Inform parent of restyle operation ###
self.parent._restyle_child(self, prop_path_str, val)
class BaseTraceType(BaseTraceHierarchyType):
"""
Base class for the all trace types.
Specific trace type classes (Scatter, Bar, etc.) are code generated as
subclasses of this class.
"""
def __init__(self, plotly_name, **kwargs):
super(BaseTraceHierarchyType, self).__init__(plotly_name, **kwargs)
# Initialize callback function lists
# ----------------------------------
# ### Callbacks to be called on hover ###
self._hover_callbacks = []
# ### Callbacks to be called on unhover ###
self._unhover_callbacks = []
# ### Callbacks to be called on click ###
self._click_callbacks = []
# ### Callbacks to be called on selection ###
self._select_callbacks = []
# ### Callbacks to be called on deselect ###
self._deselect_callbacks = []
# ### Trace index in figure ###
self._trace_ind = None
# uid
# ---
# All trace types must have a top-level UID
@property
def uid(self):
raise NotImplementedError
@uid.setter
def uid(self, val):
raise NotImplementedError
# Hover
# -----
def on_hover(self, callback, append=False):
"""
Register function to be called when the user hovers over one or more
points in this trace
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_hover callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def hover_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_hover(hover_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `hover_fn`
"""
if not append:
del self._hover_callbacks[:]
if callback:
self._hover_callbacks.append(callback)
def _dispatch_on_hover(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._hover_callbacks:
callback(self, points, state)
# Unhover
# -------
def on_unhover(self, callback, append=False):
"""
Register function to be called when the user unhovers away from one
or more points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_unhover callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def unhover_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_unhover(unhover_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `unhover_fn`
"""
if not append:
del self._unhover_callbacks[:]
if callback:
self._unhover_callbacks.append(callback)
def _dispatch_on_unhover(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._unhover_callbacks:
callback(self, points, state)
# Click
# -----
def on_click(self, callback, append=False):
"""
Register function to be called when the user clicks on one or more
points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.InputDeviceState object
append : bool
If False (the default), this callback replaces any previously
defined on_click callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points, InputDeviceState
>>> points, state = Points(), InputDeviceState()
>>> def click_fn(trace, points, state):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_click(click_fn)
Note: The creation of the `points` and `state` objects is optional,
it's simply a convenience to help the text editor perform completion
on the arguments inside `click_fn`
"""
if not append:
del self._click_callbacks[:]
if callback:
self._click_callbacks.append(callback)
def _dispatch_on_click(self, points, state):
"""
Dispatch points and device state all all hover callbacks
"""
for callback in self._click_callbacks:
callback(self, points, state)
# Select
# ------
def on_selection(self, callback, append=False):
"""
Register function to be called when the user selects one or more
points in this trace.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 4 arguments
- this trace
- plotly.callbacks.Points object
- plotly.callbacks.BoxSelector or plotly.callbacks.LassoSelector
append : bool
If False (the default), this callback replaces any previously
defined on_selection callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points
>>> points = Points()
>>> def selection_fn(trace, points, selector):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_selection(selection_fn)
Note: The creation of the `points` object is optional,
it's simply a convenience to help the text editor perform completion
on the `points` arguments inside `selection_fn`
"""
if not append:
del self._select_callbacks[:]
if callback:
self._select_callbacks.append(callback)
def _dispatch_on_selection(self, points, selector):
"""
Dispatch points and selector info to selection callbacks
"""
if "selectedpoints" in self:
# Update the selectedpoints property, which will notify all views
# of the selection change. This is a special case because no
# restyle event is emitted by plotly.js on selection events
# even though these events update the selectedpoints property.
self.selectedpoints = points.point_inds
for callback in self._select_callbacks:
callback(self, points, selector)
# deselect
# --------
def on_deselect(self, callback, append=False):
"""
Register function to be called when the user deselects points
in this trace using doubleclick.
Note: Callbacks will only be triggered when the trace belongs to a
instance of plotly.graph_objs.FigureWidget and it is displayed in an
ipywidget context. Callbacks will not be triggered on figures
that are displayed using plot/iplot.
Parameters
----------
callback
Callable function that accepts 3 arguments
- this trace
- plotly.callbacks.Points object
append : bool
If False (the default), this callback replaces any previously
defined on_deselect callbacks for this trace. If True,
this callback is appended to the list of any previously defined
callbacks.
Returns
-------
None
Examples
--------
>>> import plotly.graph_objects as go
>>> from plotly.callbacks import Points
>>> points = Points()
>>> def deselect_fn(trace, points):
... inds = points.point_inds
... # Do something
>>> trace = go.Scatter(x=[1, 2], y=[3, 0])
>>> trace.on_deselect(deselect_fn)
Note: The creation of the `points` object is optional,
it's simply a convenience to help the text editor perform completion
on the `points` arguments inside `selection_fn`
"""
if not append:
del self._deselect_callbacks[:]
if callback:
self._deselect_callbacks.append(callback)
def _dispatch_on_deselect(self, points):
"""
Dispatch points info to deselection callbacks
"""
if "selectedpoints" in self:
# Update the selectedpoints property, which will notify all views
# of the selection change. This is a special case because no
# restyle event is emitted by plotly.js on selection events
# even though these events update the selectedpoints property.
self.selectedpoints = None
for callback in self._deselect_callbacks:
callback(self, points)
class BaseFrameHierarchyType(BasePlotlyType):
"""
Base class for all types in the trace hierarchy
"""
def __init__(self, plotly_name, **kwargs):
super(BaseFrameHierarchyType, self).__init__(plotly_name, **kwargs)
def _send_prop_set(self, prop_path_str, val):
# Note: Frames are not supported by FigureWidget, and updates are not
# propagated to parents
pass
def _restyle_child(self, child, key_path_str, val):
# Note: Frames are not supported by FigureWidget, and updates are not
# propagated to parents
pass
def on_change(self, callback, *args):
raise NotImplementedError("Change callbacks are not supported on Frames")
def _get_child_props(self, child):
"""
Return the properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
"""
# Try to find index of child as a trace
# -------------------------------------
try:
trace_index = BaseFigure._index_is(self.data, child)
except ValueError:
trace_index = None
# Child is a trace
# ----------------
if trace_index is not None:
if "data" in self._props:
return self._props["data"][trace_index]
else:
return None
# Child is the layout
# -------------------
elif child is self.layout:
return self._props.get("layout", None)
# Unknown child
# -------------
else:
raise ValueError("Unrecognized child: %s" % child)
| 35.463453 | 101 | 0.560317 |
7954dec9730d49ebd5d8f8c135f57f0c8a984d3f | 769 | py | Python | neat/__init__.py | hugoaboud/neat-python | 73618fdfb393df8b6816213547b2ade419151afc | [
"BSD-3-Clause"
] | 3 | 2022-01-30T19:31:26.000Z | 2022-03-14T01:32:28.000Z | neat/__init__.py | hugoaboud/neat-python | 73618fdfb393df8b6816213547b2ade419151afc | [
"BSD-3-Clause"
] | null | null | null | neat/__init__.py | hugoaboud/neat-python | 73618fdfb393df8b6816213547b2ade419151afc | [
"BSD-3-Clause"
] | null | null | null | """A NEAT (NeuroEvolution of Augmenting Topologies) implementation"""
import neat.nn as nn
import neat.ctrnn as ctrnn
import neat.iznn as iznn
import neat.nsga2 as nsga2
import neat.distributed as distributed
from neat.config import Config
from neat.population import Population, CompleteExtinctionException
from neat.genome import DefaultGenome
from neat.reproduction import DefaultReproduction
from neat.stagnation import DefaultStagnation
from neat.reporting import StdOutReporter
from neat.species import DefaultSpeciesSet
from neat.statistics import StatisticsReporter
from neat.parallel import ParallelEvaluator
from neat.distributed import DistributedEvaluator, host_is_local
from neat.threaded import ThreadedEvaluator
from neat.checkpoint import Checkpointer
| 38.45 | 69 | 0.863459 |
7954df0dc39e2e805a2b19212fb5304428672822 | 3,240 | py | Python | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/whilevt.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/whilevt.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/whilevt.py | tchamabe1979/exareme | 462983e4feec7808e1fd447d02901502588a8879 | [
"MIT"
] | null | null | null | """
.. function:: whilevt([from:0,[to:10, step:1]], query)
Returns a range of integer numbers while a query's result is true.
:Returned table schema:
- *value* int
Number in range.
:from:
Number to begin from. Default is 0
:to:
Number to reach. Default is 10. The *to* number is not returned
:step:
Step to augment the returned numbers. Default is 1
Examples::
>>> sql("select * from range()")
C1
--
0
1
2
3
4
5
6
7
8
9
>>> sql("select * from range('from:1','to:11')")
C1
--
1
2
3
4
5
6
7
8
9
10
>>> sql("select * from range('from:2','to:15','step:3')")
C1
--
2
5
8
11
14
>>> sql("select * from range(1,10,2)")
C1
--
1
3
5
7
9
>>> sql("select * from range(5)")
C1
--
1
2
3
4
5
"""
import functions
import vtbase
registered = True
external_query = True
class WhileVT(vtbase.VT):
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
fromv = 0
tov = None
stepv = 1
checkfirst = True
query = 'select 1'
con = None
if 'from' in dictargs:
fromv = int(dictargs['from'])
if 'to' in dictargs:
tov = int(dictargs['to'])
if 'step' in dictargs:
stepv = int(dictargs['step'])
if 'checkfirst' in dictargs and dictargs['checkfirst'] in ('f', 'F', '0'):
checkfirst = False
if len(largs) >= 1:
fromv = int(largs[0])
if len(largs) >= 2:
tov = int(largs[1])
if len(largs) >= 3:
stepv = int(largs[2])
if len(largs) == 1:
fromv = 1
tov = int(largs[0]) + 1
if functions.variables.execdb is None:
con = functions.Connection('')
else:
con = functions.Connection(functions.variables.execdb)
functions.register(con)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1], "Needs a query")
else:
query = dictargs['query']
yield [('C1', 'int')]
if checkfirst:
cur = con.cursor()
res = list(cur.execute(query))
cur.close()
if len(res) == 0 or len(res[0]) == 0 or res[0][0] != 1:
return
yield (fromv,)
while True:
cur = con.cursor()
res = list(cur.execute(query))
cur.close()
if len(res) == 0 or len(res[0]) == 0 or res[0][0] != 1:
return
fromv += 1
if tov is not None and fromv >= tov:
return
yield (fromv,)
def Source():
return vtbase.VTGenerator(WhileVT)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| 19.756098 | 84 | 0.493519 |
7954df11ecda4051159df278e22d495a860d0422 | 93 | py | Python | logindemo/apps.py | KONE-XAD/django_project | 4d1a3a1eb59ab8dc43a5554bffc4d172104bf1cf | [
"Apache-2.0"
] | 1 | 2020-12-30T12:01:05.000Z | 2020-12-30T12:01:05.000Z | logindemo/apps.py | KONE-XAD/django_project | 4d1a3a1eb59ab8dc43a5554bffc4d172104bf1cf | [
"Apache-2.0"
] | null | null | null | logindemo/apps.py | KONE-XAD/django_project | 4d1a3a1eb59ab8dc43a5554bffc4d172104bf1cf | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class LogindemoConfig(AppConfig):
name = 'logindemo'
| 15.5 | 33 | 0.763441 |
7954df651d958eafb150fa055879f8594a99af0e | 290 | py | Python | scrapbook/items.py | ayushnagar123/scrapbook | b081fcac9a7826ff485ae0a580fb33ba8fa48967 | [
"MIT"
] | 1 | 2020-12-19T19:04:41.000Z | 2020-12-19T19:04:41.000Z | scrapbook/items.py | ayushnagar123/scrapbook | b081fcac9a7826ff485ae0a580fb33ba8fa48967 | [
"MIT"
] | null | null | null | scrapbook/items.py | ayushnagar123/scrapbook | b081fcac9a7826ff485ae0a580fb33ba8fa48967 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapbookItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.333333 | 53 | 0.689655 |
7954e078efe64e089051d22b81bd2027d8aabdd3 | 105 | py | Python | dataduct/__init__.py | recurly/dataduct | 29aec3526e170e5ad3b59a135780e72b69209f0b | [
"Apache-2.0"
] | 3 | 2017-12-29T11:26:15.000Z | 2022-02-11T16:44:28.000Z | dataduct/__init__.py | EverFi/dataduct | 9833ac57fece80509c68cb29d14895874caa9552 | [
"Apache-2.0"
] | 7 | 2017-09-21T23:25:24.000Z | 2021-03-29T21:46:45.000Z | dataduct/__init__.py | recurly/dataduct | 29aec3526e170e5ad3b59a135780e72b69209f0b | [
"Apache-2.0"
] | null | null | null | """Welcome to DataDuct
"""
__version__ = '0.5.0'
__import__('pkg_resources').declare_namespace(__name__)
| 21 | 55 | 0.752381 |
7954e0f8cc879bb2d2caeac53b8f707f4fc7a24b | 831 | py | Python | premade_modules/2.80/e5e885d0ecb9430a73e0a904cdb6035a2ef77e98/bpy/ops/workspace.py | echantry/fake-bpy-module | 004cdf198841e639b7d9a4c4db95ca1c0d3aa2c7 | [
"MIT"
] | null | null | null | premade_modules/2.80/e5e885d0ecb9430a73e0a904cdb6035a2ef77e98/bpy/ops/workspace.py | echantry/fake-bpy-module | 004cdf198841e639b7d9a4c4db95ca1c0d3aa2c7 | [
"MIT"
] | null | null | null | premade_modules/2.80/e5e885d0ecb9430a73e0a904cdb6035a2ef77e98/bpy/ops/workspace.py | echantry/fake-bpy-module | 004cdf198841e639b7d9a4c4db95ca1c0d3aa2c7 | [
"MIT"
] | null | null | null | def add():
'''Add a new workspace by duplicating the current one or appending one from the user configuration
'''
pass
def append_activate(idname="", filepath=""):
'''Append a workspace and make it the active one in the current window
:param idname: Identifier, Name of the workspace to append and activate
:type idname: string, (optional, never None)
:param filepath: Filepath, Path to the library
:type filepath: string, (optional, never None)
'''
pass
def delete():
'''Delete the active workspace
'''
pass
def duplicate():
'''Add a new workspace
'''
pass
def reorder_to_back():
'''Reorder workspace to be first in the list
'''
pass
def reorder_to_front():
'''Reorder workspace to be first in the list
'''
pass
| 16.294118 | 103 | 0.632972 |
7954e366cf859910a653d2a8531948cc692c9fd3 | 4,079 | py | Python | data/p3BR/R2/benchmark/startCirq308.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startCirq308.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startCirq308.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=62
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[2])) # number=38
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=39
c.append(cirq.H.on(input_qubit[2])) # number=40
c.append(cirq.H.on(input_qubit[2])) # number=59
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=60
c.append(cirq.H.on(input_qubit[2])) # number=61
c.append(cirq.H.on(input_qubit[2])) # number=42
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=43
c.append(cirq.H.on(input_qubit[2])) # number=44
c.append(cirq.H.on(input_qubit[2])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=49
c.append(cirq.H.on(input_qubit[2])) # number=50
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=54
c.append(cirq.X.on(input_qubit[2])) # number=55
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=56
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=47
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[2])) # number=51
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=52
c.append(cirq.H.on(input_qubit[2])) # number=53
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8
c.append(cirq.rx(0.17592918860102857).on(input_qubit[2])) # number=34
c.append(cirq.rx(-0.3989822670059037).on(input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.rx(2.3310617489636263).on(input_qubit[2])) # number=58
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[1])) # number=20
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.rx(-0.9173450548482197).on(input_qubit[1])) # number=57
c.append(cirq.H.on(input_qubit[1])) # number=24
c.append(cirq.Z.on(input_qubit[2])) # number=3
c.append(cirq.Z.on(input_qubit[1])) # number=41
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.Y.on(input_qubit[2])) # number=5
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=28
c.append(cirq.X.on(input_qubit[2])) # number=29
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq308.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 39.990196 | 77 | 0.683746 |
7954e3dcd0a02351b155eb963626da7cc34ef7bd | 11,288 | py | Python | Bayesian/MKM_corrected.py | Shyamdeokr/Bayesian_CO_oxidation | 3d58c1ad967522266609259eea667bff55cfef0f | [
"MIT"
] | null | null | null | Bayesian/MKM_corrected.py | Shyamdeokr/Bayesian_CO_oxidation | 3d58c1ad967522266609259eea667bff55cfef0f | [
"MIT"
] | null | null | null | Bayesian/MKM_corrected.py | Shyamdeokr/Bayesian_CO_oxidation | 3d58c1ad967522266609259eea667bff55cfef0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Microkinetic Model for CO Oxidation (Pd), (PdO) and (PdO2) + O2 ads - corrected for DFT energetics and barriers
# Reactions - 1 - 8
#
# 
# Reactions 6' - 11
# 
# Reactions 12-14
# 
# Reactions 15-16 for O2 adsorption on Pd
#
# 
# First, we import the necessary numpy and scipy modules
# In[1]:
import numpy as np
import math
from scipy.integrate import odeint
from sklearn.linear_model import LinearRegression
model = LinearRegression()
from MKM import *
# We also need to define a set of reaction conditions
# # Microkinetic Model
# In[2]:
# T = 480 # K
# PCO = 0.02 # bar PCO is 2%
# PO2 = 0.20 # bar PO2 is 20%
PCO2 = 0 # bar
R = 8.314
mAr = 39.848
mCO = 28.0101
mO2 = 32
mCO2 = 44
SCOn = 197.660
SO2n = 205.152
SCO2n = 213.79
# ... and a few physical constants and conversion factors
# In[3]:
J2eV = 6.24150974E18 # eV/J
Na = 6.0221415E23 # mol-1
h = 6.626068E-34 * J2eV # in eV*s
kb = 1.3806503E-23 * J2eV # in eV/K
# kbT = kb * T # in eV
# ## Rate constants corrected
# In[4]:
def get_rate_constants_corrected(T,delE,dEa):
kbT = kb * T # in eV
# Gas phase entropies converted to eV/K
SCOg = 197.66 * J2eV / Na # eV/K
SO2g = 205.0 * J2eV / Na
SCO2g = 213.74 * J2eV / Na
# Surface entropies as per charlie campbell's paper
# SCO2v = Sv = SO2v = SO = SCOO = 0
SCO = (0.70*SCOn - 3.3*R)*J2eV/Na
SCO2 = (0.70*SCO2n - 3.3*R)*J2eV/Na
SO2 = (0.70*SO2n - 3.3*R)*J2eV/Na
# Reaction energies
dE = np.zeros(19) # array initialization
dE[0] = delE[0] # CO adsorption (-1.09)
dE[2] = delE[1] # 1st CO2 desorption (0.52)
dE[3] = delE[2] # O2 adsorption in a vacancy(-2.07)
dE[4] = delE[3] # O2 dissociation in the vacancy(-1.29)
dE[5] = delE[4] # 2nd CO adsorption (-0.65)
dE[6] = delE[5] # 2nd CO oxidation(-1.46)
dE[7] = delE[6] # 2nd CO2 desorption(-0.0057)
dE[1] = -6.49-(dE[0]+dE[2]+dE[3]+dE[4]+dE[5]+dE[6]+dE[7]) # 1st CO oxidation (-0.22)
dE[8] = delE[4] # rxn 6'- 2nd CO adsorption using lattice O
dE[10] = delE[7] # rxn 8' - 2nd CO2 desorption leading to (V+O)*
dE[11] = delE[8] # rxn 9 - O2 adsorption at (v+O)*
dE[12] = delE[9] # rxn 10 -O2 dissociation at (v+O)*
dE[13] = delE[10] # rxn 11 - O migration from (V+O)* --> *
dE[14] = delE[11] # rxn 12 - CO adsorption at 2O*
dE[15] = delE[12] # rxn 13 - CO oxidation at 2O*
dE[16] = delE[13] # rxn 14 - CO2 desorption from O*
dE[9] = -6.49-(dE[8]+dE[10]+dE[11]+dE[12]+dE[13]+dE[14]+dE[15]+dE[16]) # rxn 7' - 2nd CO oxidation using lattice O
dE[17] = delE[14] # rxn 15 - O2 adsorption on *
dE[18] = delE[15] # rxn 16 - O2 dissociation on * to give 2O*
# Entropy changes (Ignoring dependence on T)
# dSS = 0.001
dS = np.zeros(19) # array initialization
dS[0] = SCO - SCOg # CO adsorption
dS[1] = 0 # 1st CO oxidation
dS[2] = SCO2g - SCO2 # 1st CO2 desorption
dS[3] = SO2 - SO2g # O2 adsorption in a vacancy
dS[4] = 0 # O2 dissociation in the vacancy
dS[5] = SCO - SCOg # 2nd CO adsorption
dS[6] = 0 # 2nd CO oxidation
dS[7] = SCO2g - SCO2 # 2nd CO2 desorption
dS[8] = SCO - SCOg # rxn 6'- 2nd CO adsorption using lattice O
dS[9] = 0 # rxn 7' - 2nd CO oxidation using lattice O
dS[10] = SCO2g - SCO2 # rxn 8' - 2nd CO2 desorption leading to (V+O)*
dS[11] = SO2 - SO2g # rxn 9 - O2 adsorption at (v+O)*
dS[12] = 0 # rxn 10 -O2 dissociation at (v+O)*
dS[13] = 0 # rxn 11 - O migration from (V+O)* --> *
dS[14] = SCO - SCOg # rxn 12 - CO adsorption at 2O*
dS[15] = 0 # rxn 13 - CO oxidation at 2O*
dS[16] = SCO2g - SCO2 # rxn 14 - CO2 desorption from O*
dS[17] = SO2 - SO2g # rxn 15 - O2 adsorption on *
dS[18] = 0 # rxn 16 - O2 dissociation on * to give 2O*
# Activation energy barriers
Ea = np.zeros(19) # array initialization
Ea[1] = dEa[1] # 1st CO Oxidation barrier = 0.49
Ea[4] = dEa[4] # O2 dissociation barrier at (v+O)*
Ea[6] = dEa[6] # 2nd CO Oxidation barrier (assumed)
Ea[9] = dEa[9] # 2nd CO Oxidation barrier using lattice O
Ea[12] = dEa[12] # O2 dissociation barrier at (v+O)*
Ea[13] = dEa[13] # O migration barrier from (v+O)* ---> *
Ea[15] = dEa[15] # rxn 13 - CO oxidation at 2O*
Ea[18] = dEa[18] # rxn 16 - O2 dissociation on * to give 2O*
# Entropy changes to the transition state
STS = np.zeros(19) # array initialization
STS[0] = (0.30*SCOn/R + 3.3-1/3*(18.6+math.log((mCO/mAr)**1.5*(T/298)**2.5)))*R # 1st CO adsorption entropy - ignoring
# SCOn dependence on T
STS[2] = (0.30*SCO2n/R + 3.3-1/3*(18.6+math.log((mCO2/mAr)**1.5*(T/298)**2.5)))*kb # 1st CO2 adsorption entropy
STS[3] = (0.30*SO2n/R + 3.3-1/3*(18.6+math.log((mO2/mAr)**1.5*(T/298)**2.5)))*R # O2 adsorption entropy
STS[5] = STS[14]= STS[8] = STS[0] # CO adsorption entropiesSTS[7] = STS[2]
STS[10]= STS[16] = STS[2] # CO2 adsorption entropies
STS[17]= STS[11] = STS[3]
# Calculate equilibrium and rate constants
K = [0]*19 # equilibrium constants
kf = [0]*19 # forward rate constants
kr = [0]*19 # reverse rate constants
for i in range(19):
dG = dE[i] - T*dS[i]
K[i] = np.exp(-dG/kbT)
# Enforce Ea > 0, and Ea > dE
if i not in [0,3,5,8,11,14,17]: #(steps 0, 3 and 5 are adsorption steps)
Ea[i] = max([0,dE[i],Ea[i]])
kf[i] = kbT/h * np.exp(STS[i]/kb) * np.exp(-Ea[i]/kbT)
kr[i] = kf[i]/K[i] # enforce thermodynamic consistency
else:
Ea[i] =-dE[i] # Ea[i] = Eads
kr[i] = kbT/h * np.exp(STS[i]/R) * np.exp(-Ea[i]/kbT) # STS = TS-ads for adsorption 0,3 and 5
kf[i] = K[i]*kr[i]
return (kf,kr,Ea)
# In[5]:
# theta0 = (0.0, 0., 0., 0 , 0 , 0 , 0.0) # initial coverage of CO*, CO2+v*, vac*, O2v*, O*, COO*, CO2*, COO(L)*,CO2(L)*, (v+O)*,(O2+v+O)*, 2O*, (CO+2O)*, (CO2+O)*, O2* respectively
theta0 =np.zeros(15)
# ## Corrected rates and intermediate coverage
# And we call the function with our output values, i.e., the last row of the result matrix $\theta$.
# In[6]:
def print_output_corrected(theta0,T,P,delE,dEa):
# Prints the solution of the model
(kf,kr,Ea) = get_rate_constants_corrected(T,delE,dEa)
theta = solve_ode(kf,kr,theta0,P)
rates = get_rates(theta,kf,kr,P)
# print ("the result is:")
# print
# for r,rate in enumerate(rates):
# if r in [0,3,5]:
# print ("Step",r,": rate =",rate,", kf =",kf[r],", kr=",kr[r],", reverse Ea =",Ea[r])
# else:
# print("Step",r,": rate =",rate,", kf =",kf[r],", kr=",kr[r],", Ea =",Ea[r])
# print ("The coverages for CO*, CO2+v*, vac*, O2v*, O*, COO*, CO2* are:")
# for t in theta:
# print (t)
return (rates[1]+rates[6]+rates[9]+rates[15],theta[0])
# ## Corrected Reaction orders ($P_{CO}$ & $P_{O2}$) and apparent barrier
# In[7]:
def rxn_order_CO_corrected(T,P,delE,dEa):
gridpoints = 3
rate_PCO=np.zeros(gridpoints)
PCO1=P[0]
PCO2=PCO1+0.05
PCO_range = np.linspace(PCO1,PCO2,gridpoints)
for i,PCO in enumerate(PCO_range):
rate_PCO[i]=print_output_corrected(theta0,T,[PCO,P[1]],delE,dEa)[0] # PO2 = 0.20
if rate_PCO[i]<10**-323:
rate_PCO[i]=10**-323
PCO_range = PCO_range.reshape(-1, 1)
LR_CO=model.fit(np.log(PCO_range), np.log(rate_PCO))
order_CO=LR_CO.coef_ # LR.intercept_
return order_CO[0]
# In[8]:
def rxn_order_O2_corrected(T,P,delE,dEa):
gridpoints = 3
rate_PO2=np.zeros(gridpoints)
PO21=P[1]
PO22=PO21+0.1
PO2_range = np.linspace(PO21,PO22,gridpoints)
for i,PO2 in enumerate(PO2_range):
rate_PO2[i]=print_output_corrected(theta0,T,[P[0],PO2],delE,dEa)[0] #PCO = 0.02
if rate_PO2[i]<10**-323:
rate_PO2[i]=10**-323
PO2_range = PO2_range.reshape(-1, 1)
LR_O2=model.fit(np.log(PO2_range), np.log(rate_PO2))
order_O2=LR_O2.coef_ # LR.intercept_
return order_O2[0]
# ## Corrected Apparent barrier
# In[9]:
def apparent_barrier_corrected(T,P,delE,dEa):
gridpoints = 3
rate_app=np.zeros(gridpoints)
T1=T-1
T2=T+1
T_range = np.linspace(T1,T2,gridpoints)
for i,T in enumerate(T_range):
rate_app[i]=print_output_corrected(theta0,T,P,delE,dEa)[0] # PCO and PO2 are 0.02 and 0.20 respectively
if rate_app[i]<10**-323:
rate_app[i]=10**-323
# plt.plot(1/T_range, np.log(rate_app), 'ro')
# plt.xlabel('1/T')
# plt.ylabel('log[rate_CO2_formation]')
# plt.show()
T_range = T_range.reshape(-1, 1)
LR=model.fit(1/T_range, np.log(rate_app)) #Arhenius equation
LR.coef_
# apparent_barrier=-LR.coef_*0.02568/298 #apparent barrier in eV
apparent_barrier=-LR.coef_*kb #apparent barrier in eV
apparent_barrier[0] # LR.intercept_
return apparent_barrier[0]
# ## Corrected Degree of rate control
# In[10]:
def degree_of_rate_control_corrected(theta0,T,P,delE,dEa):
# Prints the solution of the model
diffk_0=0.99
diffk_1=1.01
XRC=np.zeros(19)
for i in range(19):
(kf0,kr0,Ea) = get_rate_constants_corrected(T,delE,dEa)
kf0[i]=kf0[i]*diffk_0
kr0[i]=kr0[i]*diffk_0
theta = solve_ode(kf0,kr0,theta0,P)
rates0 = get_rates(theta,kf0,kr0,P)[1]+get_rates(theta,kf0,kr0,P)[6]+get_rates(theta,kf0,kr0,P)[9]+get_rates(theta,kf0,kr0,P)[15]
(kf1,kr1,Ea) = get_rate_constants_corrected(T,delE,dEa)
kf1[i]=kf1[i]*diffk_1
kr1[i]=kr1[i]*diffk_1
theta = solve_ode(kf1,kr1,theta0,P)
# rates1 = get_rates(theta,kf1,kr1,P)
rates1 = get_rates(theta,kf1,kr1,P)[1]+get_rates(theta,kf1,kr1,P)[6]+get_rates(theta,kf1,kr1,P)[9]+get_rates(theta,kf1,kr1,P)[15]
# print(rates0,rates1)
XRC[i] = (math.log(rates1/rates0))/(math.log(kf1[i]/kf0[i]))
# print("step", i+1, " ",rates0," ", rates1," ",np.round(XRC[i],3))
return (XRC)
# In[ ]:
| 35.721519 | 181 | 0.536588 |
7954e4238d1958fc0a3c74febf2243695b29ad56 | 2,563 | py | Python | python/BehaviorPolicy.py | dquail/GVFMinecraft | 5eae9ea9974ec604194b32cdb235765ea3fe7fb3 | [
"MIT"
] | null | null | null | python/BehaviorPolicy.py | dquail/GVFMinecraft | 5eae9ea9974ec604194b32cdb235765ea3fe7fb3 | [
"MIT"
] | null | null | null | python/BehaviorPolicy.py | dquail/GVFMinecraft | 5eae9ea9974ec604194b32cdb235765ea3fe7fb3 | [
"MIT"
] | null | null | null | from random import randint
import numpy as np
import random
class BehaviorPolicy:
def __init__(self):
self.lastAction = 0
self.i = 0
self.ACTIONS = {
'forward': "move 1",
'turn_left': "turn -1",
'turn_right': "turn 1",
'extend_hand':"move 0"
}
'''
self.ACTIONS = {
'forward': "move 1",
'back': "move -1",
'turn_left': "turn -1",
'turn_right': "turn 1",
'extend_hand':"move 0",
'no_move': "move 0"
}
'''
def policy(self, state):
self.i = self.i + 1
isFacingWall = state[len(state) - 1] == 1 #Last bit in the feature representation represents facing the wall
if isFacingWall:
return self.ACTIONS['look_left']
else:
return self.ACTIONS['forward']
def randomTurnPolicy(self, state):
moves = [self.ACTIONS['turn_left'], self.ACTIONS['turn_right']]
return moves[randint(0,1)]
def forwardThenLeftPolicy(self,state):
self.i+=1
if self.i%20 == 0:
return self.turnLeftPolicy(state)
else:
return self.moveForwardPolicy(state)
def mostlyForwardPolicy(self,state):
if self.i %21 == 0:
return self.randomPolicy(state)
else:
return self.moveForwardPolicy(state)
def mostlyForwardAndTouchPolicy(self, state):
self.i += 1
if self.i % 50 == 0:
return self.turnRightPolicy(state)
elif (self.i -1) % 50 == 0:
return self.turnRightPolicy(state)
elif ((self.i -2) %50) == 0:
return self.turnRightPolicy(state)
elif self.i % 7 == 0:
return self.randomTurnPolicy(state)
elif self.i % 8 == 0:
return self.randomTurnPolicy(state)
elif self.i % 19 ==0:
return self.randomPolicy(state)
elif self.i % 21 == 0:
return self.mostlyForwardPolicy(state)
elif self.i % 23 == 0:
return self.mostlyForwardPolicy(state)
else:
if self.i % 2 == 0 and self.i < 30000:
return self.ACTIONS['extend_hand']
elif (self.i - 1 ) % 4 == 0:
return self.randomPolicy(state)
else:
return self.mostlyForwardPolicy(state)
def extendHandPolicy(self, state):
return self.ACTIONS['extend_hand']
def randomPolicy(self, state):
return self.ACTIONS[random.choice(list(self.ACTIONS.keys()))]
def moveForwardPolicy(self, state):
return self.ACTIONS['forward']
def turnLeftPolicy(self, state):
return self.ACTIONS['turn_left']
def turnRightPolicy(self, state):
return self.ACTIONS['turn_right']
def epsilonGreedyPolicy(self, state):
print("Do something here") | 26.153061 | 112 | 0.62778 |
7954e4b7437835e9b134957a4e097830a9031d2a | 2,134 | py | Python | metrics/metrics.py | ksboy/ccks3 | c500af33b6b879751ea04ce5fab456b01db9868c | [
"Apache-2.0"
] | 1 | 2021-07-14T06:30:20.000Z | 2021-07-14T06:30:20.000Z | metrics/metrics.py | ksboy/ccks3 | c500af33b6b879751ea04ce5fab456b01db9868c | [
"Apache-2.0"
] | null | null | null | metrics/metrics.py | ksboy/ccks3 | c500af33b6b879751ea04ce5fab456b01db9868c | [
"Apache-2.0"
] | null | null | null | def precision_score(batch_labels, batch_preds):
assert len(batch_labels) == len(batch_preds)
nb_correct, nb_pred = 0, 0
for labels, preds in zip(batch_labels, batch_preds):
for label in labels:
if label in preds:
nb_correct += 1
nb_pred += len(preds)
p = nb_correct / nb_pred if nb_pred > 0 else 0
return p
def recall_score(batch_labels, batch_preds):
assert len(batch_labels) == len(batch_preds)
nb_correct, nb_true = 0, 0
for labels, preds in zip(batch_labels, batch_preds):
for label in labels:
if label in preds:
nb_correct += 1
nb_true += len(labels)
r = nb_correct / nb_true if nb_true > 0 else 0
return r
def f1_score(batch_labels, batch_preds):
assert len(batch_labels) == len(batch_preds)
nb_correct, nb_true, nb_pred = 0, 0, 0
for labels, preds in zip(batch_labels, batch_preds):
for label in labels:
if label in preds:
nb_correct += 1
nb_true += len(labels)
nb_pred += len(preds)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
return f1
# 一维
def _precision_score(labels, preds):
nb_correct = 0
for label in labels:
if label in preds:
nb_correct += 1
# continue
nb_pred = len(preds)
p = nb_correct / nb_pred if nb_pred > 0 else 0
return p
# 一维
def _recall_score(labels, preds):
nb_correct = 0
for label in labels:
if label in preds:
nb_correct += 1
# continue
nb_true = len(labels)
r = nb_correct / nb_true if nb_true > 0 else 0
return r
# 一维
def _f1_score(labels, preds):
nb_correct = 0
for label in labels:
if label in preds:
nb_correct += 1
# continue
nb_pred = len(labels)
nb_true = len(preds)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
return f1
| 29.638889 | 56 | 0.592315 |
7954e54b7f90e1996e0825e843eeb686206c43af | 1,640 | py | Python | src/atexits.py | necessary129/IRC-Lib | a291d236a6fdcc85779a65b205efe2f5661230bd | [
"BSD-2-Clause"
] | null | null | null | src/atexits.py | necessary129/IRC-Lib | a291d236a6fdcc85779a65b205efe2f5661230bd | [
"BSD-2-Clause"
] | null | null | null | src/atexits.py | necessary129/IRC-Lib | a291d236a6fdcc85779a65b205efe2f5661230bd | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2015 noteness
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import atexit
def atexist():
from src import printer
import datetime
import time
date = str(datetime.date.today())
printer("***End logging on {0}***".format(date))
def saveconf():
import json
import config
dc = {}
for x in dir(config):
if x.startswith('__'):
continue
dc[x] = vars(config)[x]
if isinstance(dc[x],set):
dc[x]= list(dc[x])
cnfg = open('config.json','w')
json.dump(dc,cnfg,indent=4)
cnfg.close()
atexit.register(atexist)
atexit.register(saveconf) | 40 | 79 | 0.716463 |
7954e5ff05a05f799796cff9f3bb5849f82851d0 | 14,216 | py | Python | python_modules/dagster/dagster/core/definitions/sensor.py | soyelherein/dagster | f969a3b24e66e61f91e3a7beaedbd2a87f86f85d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/definitions/sensor.py | soyelherein/dagster | f969a3b24e66e61f91e3a7beaedbd2a87f86f85d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/definitions/sensor.py | soyelherein/dagster | f969a3b24e66e61f91e3a7beaedbd2a87f86f85d | [
"Apache-2.0"
] | null | null | null | import inspect
import warnings
from contextlib import ExitStack
from typing import Any, Callable, Generator, List, NamedTuple, Optional, Union, cast
from dagster import check
from dagster.core.errors import DagsterInvalidInvocationError, DagsterInvariantViolationError
from dagster.core.instance import DagsterInstance
from dagster.core.instance.ref import InstanceRef
from dagster.serdes import whitelist_for_serdes
from dagster.utils import ensure_gen
from dagster.utils.backcompat import (
ExperimentalWarning,
experimental_arg_warning,
experimental_fn_warning,
)
from ..decorator_utils import get_function_params
from .graph import GraphDefinition
from .mode import DEFAULT_MODE_NAME
from .run_request import JobType, PipelineRunReaction, RunRequest, SkipReason
from .target import DirectTarget, RepoRelativeTarget
from .utils import check_valid_name
DEFAULT_SENSOR_DAEMON_INTERVAL = 30
class SensorExecutionContext:
"""Sensor execution context.
An instance of this class is made available as the first argument to the evaluation function
on SensorDefinition.
Attributes:
instance_ref (Optional[InstanceRef]): The serialized instance configured to run the schedule
cursor (Optional[str]): The cursor, passed back from the last sensor evaluation via
the cursor attribute of SkipReason and RunRequest
last_completion_time (float): DEPRECATED The last time that the sensor was evaluated (UTC).
last_run_key (str): DEPRECATED The run key of the RunRequest most recently created by this
sensor. Use the preferred `cursor` attribute instead.
"""
def __init__(
self,
instance_ref: Optional[InstanceRef],
last_completion_time: Optional[float],
last_run_key: Optional[str],
cursor: Optional[str],
):
self._exit_stack = ExitStack()
self._instance = None
self._instance_ref = check.opt_inst_param(instance_ref, "instance_ref", InstanceRef)
self._last_completion_time = check.opt_float_param(
last_completion_time, "last_completion_time"
)
self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")
self._cursor = check.opt_str_param(cursor, "cursor")
self._instance = None
def __enter__(self):
return self
def __exit__(self, _exception_type, _exception_value, _traceback):
self._exit_stack.close()
@property
def instance(self) -> DagsterInstance:
# self._instance_ref should only ever be None when this SensorExecutionContext was
# constructed under test.
if not self._instance_ref:
raise DagsterInvariantViolationError(
"Attempted to initialize dagster instance, but no instance reference was provided."
)
if not self._instance:
self._instance = self._exit_stack.enter_context(
DagsterInstance.from_ref(self._instance_ref)
)
return cast(DagsterInstance, self._instance)
@property
def last_completion_time(self) -> Optional[float]:
return self._last_completion_time
@property
def last_run_key(self) -> Optional[str]:
return self._last_run_key
@property
def cursor(self) -> Optional[str]:
"""The cursor value for this sensor, which was set in an earlier sensor evaluation."""
return self._cursor
def update_cursor(self, cursor: Optional[str]) -> None:
"""Updates the cursor value for this sensor, which will be provided on the context for the
next sensor evaluation.
This can be used to keep track of progress and avoid duplicate work across sensor
evaluations.
Args:
cursor (Optional[str]):
"""
self._cursor = check.opt_str_param(cursor, "cursor")
class SensorDefinition:
"""Define a sensor that initiates a set of runs based on some external state
Args:
name (str): The name of the sensor to create.
evaluation_fn (Callable[[SensorExecutionContext]]): The core evaluation function for the
sensor, which is run at an interval to determine whether a run should be launched or
not. Takes a :py:class:`~dagster.SensorExecutionContext`.
This function must return a generator, which must yield either a single SkipReason
or one or more RunRequest objects.
pipeline_name (Optional[str]): The name of the pipeline to execute when the sensor fires.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the sensor runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The mode to apply when executing runs triggered by this sensor.
(default: 'default')
minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse
between sensor evaluations.
description (Optional[str]): A human-readable description of the sensor.
job (Optional[PipelineDefinition]): Experimental
"""
def __init__(
self,
name: str,
evaluation_fn: Callable[
["SensorExecutionContext"],
Union[Generator[Union[RunRequest, SkipReason], None, None], RunRequest, SkipReason],
],
pipeline_name: Optional[str] = None,
solid_selection: Optional[List[Any]] = None,
mode: Optional[str] = None,
minimum_interval_seconds: Optional[int] = None,
description: Optional[str] = None,
job: Optional[GraphDefinition] = None,
decorated_fn: Optional[
Callable[
["SensorExecutionContext"],
Union[Generator[Union[RunRequest, SkipReason], None, None], RunRequest, SkipReason],
]
] = None,
):
self._name = check_valid_name(name)
if pipeline_name is None and job is None:
warnings.warn(
f'Neither pipeline_name or job is provided. Sensor "{name}" will not target a pipeline.',
ExperimentalWarning,
)
self._target: Optional[Union[DirectTarget, RepoRelativeTarget]] = None
elif job is not None:
experimental_arg_warning("target", "SensorDefinition.__init__")
self._target = DirectTarget(job)
else:
self._target = RepoRelativeTarget(
pipeline_name=check.str_param(pipeline_name, "pipeline_name"),
mode=check.opt_str_param(mode, "mode") or DEFAULT_MODE_NAME,
solid_selection=check.opt_nullable_list_param(
solid_selection, "solid_selection", of_type=str
),
)
self._description = check.opt_str_param(description, "description")
self._evaluation_fn = check.callable_param(evaluation_fn, "evaluation_fn")
self._decorated_fn = check.opt_callable_param(decorated_fn, "decorated_fn")
self._min_interval = check.opt_int_param(
minimum_interval_seconds, "minimum_interval_seconds", DEFAULT_SENSOR_DAEMON_INTERVAL
)
def __call__(self, *args, **kwargs):
if not self._decorated_fn:
raise DagsterInvalidInvocationError(
"Sensor invocation is only supported for sensors created via the `@sensor` "
"decorator."
)
if len(args) == 0 and len(kwargs) == 0:
raise DagsterInvalidInvocationError(
"Sensor decorated function has context argument, but no context argument was "
"provided when invoking."
)
if len(args) + len(kwargs) > 1:
raise DagsterInvalidInvocationError(
"Sensor invocation received multiple arguments. Only a first "
"positional context parameter should be provided when invoking."
)
context_param_name = get_function_params(self._decorated_fn)[0].name
if args:
context = check.opt_inst_param(args[0], context_param_name, SensorExecutionContext)
else:
if context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Sensor invocation expected argument '{context_param_name}'."
)
context = check.opt_inst_param(
kwargs[context_param_name], context_param_name, SensorExecutionContext
)
context = context if context else build_sensor_context()
return self._decorated_fn(context)
@property
def name(self) -> str:
return self._name
@property
def pipeline_name(self) -> Optional[str]:
return self._target.pipeline_name if self._target else None
@property
def job_type(self) -> JobType:
return JobType.SENSOR
@property
def solid_selection(self) -> Optional[List[Any]]:
return self._target.solid_selection if self._target else None
@property
def mode(self) -> Optional[str]:
return self._target.mode if self._target else None
@property
def description(self) -> Optional[str]:
return self._description
def evaluate_tick(self, context: "SensorExecutionContext") -> "SensorExecutionData":
"""Evaluate sensor using the provided context.
Args:
context (SensorExecutionContext): The context with which to evaluate this sensor.
Returns:
SensorExecutionData: Contains list of run requests, or skip message if present.
"""
check.inst_param(context, "context", SensorExecutionContext)
result = list(ensure_gen(self._evaluation_fn(context)))
if not result or result == [None]:
run_requests = []
pipeline_run_reactions = []
skip_message = None
elif len(result) == 1:
item = result[0]
check.inst(item, (SkipReason, RunRequest, PipelineRunReaction))
run_requests = [item] if isinstance(item, RunRequest) else []
pipeline_run_reactions = [item] if isinstance(item, PipelineRunReaction) else []
skip_message = item.skip_message if isinstance(item, SkipReason) else None
elif isinstance(result[0], RunRequest):
check.is_list(result, of_type=RunRequest)
run_requests = result
pipeline_run_reactions = []
skip_message = None
else:
run_requests = []
check.is_list(result, of_type=PipelineRunReaction)
pipeline_run_reactions = result
skip_message = None
return SensorExecutionData(
run_requests, skip_message, context.cursor, pipeline_run_reactions
)
@property
def minimum_interval_seconds(self) -> Optional[int]:
return self._min_interval
def has_loadable_target(self):
return isinstance(self._target, DirectTarget)
def load_target(self):
if isinstance(self._target, DirectTarget):
return self._target.load()
check.failed("Target is not loadable")
@whitelist_for_serdes
class SensorExecutionData(
NamedTuple(
"_SensorExecutionData",
[
("run_requests", Optional[List[RunRequest]]),
("skip_message", Optional[str]),
("cursor", Optional[str]),
("pipeline_run_reactions", Optional[List[PipelineRunReaction]]),
],
)
):
def __new__(
cls,
run_requests: Optional[List[RunRequest]] = None,
skip_message: Optional[str] = None,
cursor: Optional[str] = None,
pipeline_run_reactions: Optional[List[PipelineRunReaction]] = None,
):
check.opt_list_param(run_requests, "run_requests", RunRequest)
check.opt_str_param(skip_message, "skip_message")
check.opt_str_param(cursor, "cursor")
check.opt_list_param(pipeline_run_reactions, "pipeline_run_reactions", PipelineRunReaction)
check.invariant(
not (run_requests and skip_message), "Found both skip data and run request data"
)
return super(SensorExecutionData, cls).__new__(
cls,
run_requests=run_requests,
skip_message=skip_message,
cursor=cursor,
pipeline_run_reactions=pipeline_run_reactions,
)
def wrap_sensor_evaluation(
sensor_name: str,
result: Union[Generator[Union[SkipReason, RunRequest], None, None], SkipReason, RunRequest],
) -> Generator[Union[SkipReason, RunRequest], None, None]:
if inspect.isgenerator(result):
for item in result:
yield item
elif isinstance(result, (SkipReason, RunRequest, PipelineRunReaction)):
yield result
elif result is not None:
raise DagsterInvariantViolationError(
f"Error in sensor {sensor_name}: Sensor unexpectedly returned output "
f"{result} of type {type(result)}. Should only return SkipReason, PipelineRunReaction, or "
"RunRequest objects."
)
def build_sensor_context(
instance: Optional[DagsterInstance] = None, cursor: Optional[str] = None
) -> SensorExecutionContext:
"""Builds sensor execution context using the provided parameters.
This function can be used to provide a context to the invocation of a sensor definition.If
provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an
error.
Args:
instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.
cursor (Optional[str]): A cursor value to provide to the evaluation of the sensor.
Examples:
.. code-block:: python
context = build_sensor_context()
my_sensor(context)
"""
experimental_fn_warning("build_sensor_context")
check.opt_inst_param(instance, "instance", DagsterInstance)
check.opt_str_param(cursor, "cursor")
return SensorExecutionContext(
instance_ref=instance.get_ref() if instance else None,
last_completion_time=None,
last_run_key=None,
cursor=cursor,
)
| 38.010695 | 105 | 0.662704 |
7954e6066e5a19cf56e5e8b8a59be50725a60bdc | 5,454 | py | Python | spambayes/chi2.py | nascheme/spambayes-core | aa2f6983c936af86c70953c93b276831e5d19429 | [
"PSF-2.0"
] | null | null | null | spambayes/chi2.py | nascheme/spambayes-core | aa2f6983c936af86c70953c93b276831e5d19429 | [
"PSF-2.0"
] | null | null | null | spambayes/chi2.py | nascheme/spambayes-core | aa2f6983c936af86c70953c93b276831e5d19429 | [
"PSF-2.0"
] | null | null | null | from __future__ import print_function
import math as _math
import random
def chi2Q(x2, v, exp=_math.exp, min=min):
"""Return prob(chisq >= x2, with v degrees of freedom).
v must be even.
"""
assert v & 1 == 0
# XXX If x2 is very large, exp(-m) will underflow to 0.
m = x2 / 2.0
sum = term = exp(-m)
for i in range(1, v // 2):
term *= m / i
sum += term
# With small x2 and large v, accumulated roundoff error, plus error in
# the platform exp(), can cause this to spill a few ULP above 1.0. For
# example, chi2Q(100, 300) on my box has sum == 1.0 + 2.0**-52 at this
# point. Returning a value even a teensy bit over 1.0 is no good.
return min(sum, 1.0)
def normZ(z, sqrt2pi=_math.sqrt(2.0 * _math.pi), exp=_math.exp):
"Return value of the unit Gaussian at z."
return exp(-z * z / 2.0) / sqrt2pi
def normP(z):
"""Return area under the unit Gaussian from -inf to z.
This is the probability that a zscore is <= z.
"""
# This is very accurate in a fixed-point sense. For negative z of
# large magnitude (<= -8.3), it returns 0.0, essentially because
# P(-z) is, to machine precision, indistiguishable from 1.0 then.
# sum <- area from 0 to abs(z).
a = abs(float(z))
if a >= 8.3:
sum = 0.5
else:
sum2 = term = a * normZ(a)
z2 = a * a
sum = 0.0
i = 1.0
while sum != sum2:
sum = sum2
i += 2.0
term *= z2 / i
sum2 += term
if z >= 0:
result = 0.5 + sum
else:
result = 0.5 - sum
return result
def normIQ(p, sqrt=_math.sqrt, ln=_math.log):
"""Return z such that the area under the unit Gaussian from z to +inf is p.
Must have 0.0 <= p <= 1.0.
"""
assert 0.0 <= p <= 1.0
# This is a low-accuracy rational approximation from Abramowitz & Stegun.
# The absolute error is bounded by 3e-3.
flipped = False
if p > 0.5:
flipped = True
p = 1.0 - p
if p == 0.0:
z = 8.3
else:
t = sqrt(-2.0 * ln(p))
z = t - (
(2.30753 + 0.27061 * t) / (1.0 + 0.99229 * t + 0.04481 * t ** 2)
)
if flipped:
z = -z
return z
def normIP(p):
"""Return z such that the area under the unit Gaussian from -inf to z is p.
Must have 0.0 <= p <= 1.0.
"""
z = normIQ(1.0 - p)
# One Newton step should double the # of good digits.
return z + (p - normP(z)) / normZ(z)
def main():
from spambayes.Histogram import Hist
import sys
class WrappedRandom:
# There's no way W-H is equidistributed in 50 dimensions, so use
# Marsaglia-wrapping to shuffle it more.
def __init__(self, baserandom=random.random, tabsize=513):
self.baserandom = baserandom
self.n = tabsize
self.tab = [baserandom() for _i in range(tabsize)]
self.next = baserandom()
def random(self):
result = self.__next__
i = int(result * self.n)
self.next = self.tab[i]
self.tab[i] = self.baserandom()
return result
random = WrappedRandom().random
# from uni import uni as random
# print random
def judge(ps, ln=_math.log, ln2=_math.log(2), frexp=_math.frexp):
H = S = 1.0
Hexp = Sexp = 0
for p in ps:
S *= 1.0 - p
H *= p
if S < 1e-200:
S, e = frexp(S)
Sexp += e
if H < 1e-200:
H, e = frexp(H)
Hexp += e
S = ln(S) + Sexp * ln2
H = ln(H) + Hexp * ln2
n = len(ps)
S = 1.0 - chi2Q(-2.0 * S, 2 * n)
H = 1.0 - chi2Q(-2.0 * H, 2 * n)
return S, H, (S - H + 1.0) / 2.0
warp = 0
bias = 0.99
if len(sys.argv) > 1:
warp = int(sys.argv[1])
if len(sys.argv) > 2:
bias = float(sys.argv[2])
h = Hist(20, lo=0.0, hi=1.0)
s = Hist(20, lo=0.0, hi=1.0)
score = Hist(20, lo=0.0, hi=1.0)
for _i in range(5000):
ps = [random() for _j in range(50)]
s1, h1, score1 = judge(ps + [bias] * warp)
s.add(s1)
h.add(h1)
score.add(score1)
print("Result for random vectors of 50 probs, +", warp, "forced to", bias)
# Should be uniformly distributed on all-random data.
print()
print('H', end=' ')
h.display()
# Should be uniformly distributed on all-random data.
print()
print('S', end=' ')
s.display()
# Distribution doesn't really matter.
print()
print('(S-H+1)/2', end=' ')
score.display()
def showscore(ps, ln=_math.log, ln2=_math.log(2), frexp=_math.frexp):
H = S = 1.0
Hexp = Sexp = 0
for p in ps:
S *= 1.0 - p
H *= p
if S < 1e-200:
S, e = frexp(S)
Sexp += e
if H < 1e-200:
H, e = frexp(H)
Hexp += e
S = ln(S) + Sexp * ln2
H = ln(H) + Hexp * ln2
n = len(ps)
probS = chi2Q(-2 * S, 2 * n)
probH = chi2Q(-2 * H, 2 * n)
print("P(chisq >= %10g | v=%3d) = %10g" % (-2 * S, 2 * n, probS))
print("P(chisq >= %10g | v=%3d) = %10g" % (-2 * H, 2 * n, probH))
S = 1.0 - probS
H = 1.0 - probH
score = (S - H + 1.0) / 2.0
print("spam prob", S)
print(" ham prob", H)
print("(S-H+1)/2", score)
if __name__ == '__main__':
main()
| 25.726415 | 79 | 0.506051 |
7954e6d79e7f2c969e9da72997e8aedbf6ef83fa | 2,031 | py | Python | akshare/air/air_hebei.py | dindom999/akshare | 95b38d3430c71637c3ee9ba799618c20afe4a010 | [
"MIT"
] | 1 | 2020-05-14T13:20:48.000Z | 2020-05-14T13:20:48.000Z | akshare/air/air_hebei.py | dindom999/akshare | 95b38d3430c71637c3ee9ba799618c20afe4a010 | [
"MIT"
] | 1 | 2020-09-07T11:18:55.000Z | 2020-09-07T11:18:55.000Z | akshare/air/air_hebei.py | dindom999/akshare | 95b38d3430c71637c3ee9ba799618c20afe4a010 | [
"MIT"
] | 2 | 2020-09-23T08:50:14.000Z | 2020-09-28T09:57:07.000Z | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/4/29 12:33
Desc: 河北省空气质量预报信息发布系统
http://110.249.223.67/publish/
每日 17 时发布
等级划分
1. 空气污染指数为0-50,空气质量级别为一级,空气质量状况属于优。此时,空气质量令人满意,基本无空气污染,各类人群可正常活动。
2. 空气污染指数为51-100,空气质量级别为二级,空气质量状况属于良。此时空气质量可接受,但某些污染物可能对极少数异常敏感人群健康有较弱影响,建议极少数异常敏感人群应减少户外活动。
3. 空气污染指数为101-150,空气质量级别为三级,空气质量状况属于轻度污染。此时,易感人群症状有轻度加剧,健康人群出现刺激症状。建议儿童、老年人及心脏病、呼吸系统疾病患者应减少长时间、高强度的户外锻炼。
4. 空气污染指数为151-200,空气质量级别为四级,空气质量状况属于中度污染。此时,进一步加剧易感人群症状,可能对健康人群心脏、呼吸系统有影响,建议疾病患者避免长时间、高强度的户外锻练,一般人群适量减少户外运动。
5. 空气污染指数为201-300,空气质量级别为五级,空气质量状况属于重度污染。此时,心脏病和肺病患者症状显著加剧,运动耐受力降低,健康人群普遍出现症状,建议儿童、老年人和心脏病、肺病患者应停留在室内,停止户外运动,一般人群减少户外运动。
6. 空气污染指数大于300,空气质量级别为六级,空气质量状况属于严重污染。此时,健康人群运动耐受力降低,有明显强烈症状,提前出现某些疾病,建议儿童、老年人和病人应当留在室内,避免体力消耗,一般人群应避免户外活动。
发布单位:河北省环境应急与重污染天气预警中心 技术支持:中国科学院大气物理研究所 中科三清科技有限公司
"""
from datetime import datetime
import pandas as pd
import requests
def air_quality_hebei(city: str = "唐山市") -> pd.DataFrame:
"""
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://110.249.223.67/publish/
:param city: ['石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市']
:type city: str
:return: city = "", 返回所有地区的数据; city="唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
"""
url = "http://110.249.223.67/publishNewServer/api/CityPublishInfo/GetProvinceAndCityPublishData"
params = {
"publishDate": f"{datetime.today().strftime('%Y-%m-%d')} 16:00:00"
}
r = requests.get(url, params=params)
json_data = r.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")["CityName"].tolist()
outer_df = pd.DataFrame()
for i in range(1, 7):
inner_df = pd.DataFrame([item[f"Date{i}"] for item in json_data["cityPublishDatas"]], index=city_list)
outer_df = outer_df.append(inner_df)
if city == "":
return outer_df
else:
return outer_df[outer_df.index == city]
if __name__ == "__main__":
air_quality_hebei_df = air_quality_hebei(city="石家庄市")
print(air_quality_hebei_df)
| 39.057692 | 120 | 0.716396 |
7954e82f3d1d2912222e367392c4e5fc6f9ca09f | 1,835 | py | Python | learned_optimization/tasks/parametric/image_mlp_ae_test.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 70 | 2021-12-16T07:12:11.000Z | 2022-03-31T19:13:36.000Z | learned_optimization/tasks/parametric/image_mlp_ae_test.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 10 | 2021-12-29T10:03:37.000Z | 2022-03-22T15:59:55.000Z | learned_optimization/tasks/parametric/image_mlp_ae_test.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 5 | 2021-12-16T04:52:35.000Z | 2022-03-22T03:45:31.000Z | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_mlp_ae."""
from absl.testing import absltest
import jax
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.datasets import image
from learned_optimization.tasks.parametric import cfgobject
from learned_optimization.tasks.parametric import image_mlp_ae
class ImageMlpAETest(absltest.TestCase):
def test_ParametricImageMLPAE(self):
datasets = image.mnist_datasets(8, image_size=(8, 8))
task_family = image_mlp_ae.ParametricImageMLPAE(datasets, (16, 5))
test_utils.smoketest_task_family(task_family)
def test_sample_image_mlp_ae(self):
key = jax.random.PRNGKey(0)
cfg1 = image_mlp_ae.sample_image_mlp_ae(key)
cfg2 = image_mlp_ae.sample_image_mlp_ae(key)
key = jax.random.PRNGKey(1)
cfg3 = image_mlp_ae.sample_image_mlp_ae(key)
self.assertEqual(cfg1, cfg2)
self.assertNotEqual(cfg1, cfg3)
obj = cfgobject.object_from_config(cfg1)
self.assertIsInstance(obj, image_mlp_ae.ParametricImageMLPAE)
def test_timed_sample_image_mlp_ae(self):
key = jax.random.PRNGKey(0)
sampled_task = image_mlp_ae.timed_sample_image_mlp_ae(key)
self.assertIsInstance(sampled_task, cfgobject.CFGObject)
if __name__ == '__main__':
absltest.main()
| 34.622642 | 74 | 0.777112 |
7954e96bfda7f3778511b6705774b7981004f354 | 8,061 | py | Python | docs/shared_bindings_matrix.py | Neradoc/circuitpython | 932131b4ff4b95066a872b5b299a84e80b7c45d3 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 3,010 | 2017-01-07T23:43:33.000Z | 2022-03-31T06:02:59.000Z | docs/shared_bindings_matrix.py | Neradoc/circuitpython | 932131b4ff4b95066a872b5b299a84e80b7c45d3 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 4,478 | 2017-01-06T01:35:02.000Z | 2022-03-31T23:03:27.000Z | docs/shared_bindings_matrix.py | Neradoc/circuitpython | 932131b4ff4b95066a872b5b299a84e80b7c45d3 | [
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 1,149 | 2017-01-09T00:35:23.000Z | 2022-03-31T21:24:29.000Z | # The MIT License (MIT)
#
# SPDX-FileCopyrightText: Copyright (c) 2019 Michael Schroeder
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import os
import pathlib
import re
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
SUPPORTED_PORTS = ['atmel-samd', 'cxd56', 'espressif', 'litex', 'mimxrt10xx', 'nrf', 'raspberrypi', 'stm']
aliases_by_board = {
"circuitplayground_express": [
"circuitplayground_express_4h",
"circuitplayground_express_digikey_pycon2019",
],
"pybadge": ["edgebadge"],
"pyportal": ["pyportal_pynt"],
"gemma_m0": ["gemma_m0_pycon2018"],
"pewpew10": ["pewpew13"],
}
aliases_brand_names = {
"circuitplayground_express_4h":
"Adafruit Circuit Playground Express 4-H",
"circuitplayground_express_digikey_pycon2019":
"Circuit Playground Express Digi-Key PyCon 2019",
"edgebadge":
"Adafruit EdgeBadge",
"pyportal_pynt":
"Adafruit PyPortal Pynt",
"gemma_m0_pycon2018":
"Adafruit Gemma M0 PyCon 2018",
"pewpew13":
"PewPew 13",
}
additional_modules = {
"fontio": "CIRCUITPY_DISPLAYIO",
"terminalio": "CIRCUITPY_DISPLAYIO",
"adafruit_bus_device": "CIRCUITPY_BUSDEVICE",
"adafruit_pixelbuf": "CIRCUITPY_PIXELBUF"
}
def get_circuitpython_root_dir():
""" The path to the root './circuitpython' directory
"""
file_path = pathlib.Path(__file__).resolve()
root_dir = file_path.parent.parent
return root_dir
def get_shared_bindings():
""" Get a list of modules in shared-bindings based on folder names
"""
shared_bindings_dir = get_circuitpython_root_dir() / "shared-bindings"
return [item.name for item in shared_bindings_dir.iterdir()] + ["binascii", "errno", "json", "re", "ulab"]
def read_mpconfig():
""" Open 'circuitpy_mpconfig.mk' and return the contents.
"""
configs = []
cpy_mpcfg = get_circuitpython_root_dir() / "py" / "circuitpy_mpconfig.mk"
with open(cpy_mpcfg) as mpconfig:
configs = mpconfig.read()
return configs
def build_module_map():
""" Establish the base of the JSON file, based on the contents from
`configs`. Base will contain module names, if they're part of
the `FULL_BUILD`, or their default value (0, 1, or a list of
modules that determine default [see audiocore, audiomixer, etc.]).
"""
base = dict()
modules = get_shared_bindings()
configs = read_mpconfig()
full_build = False
for module in modules:
full_name = module
if module in additional_modules:
search_identifier = additional_modules[module]
else:
search_identifier = 'CIRCUITPY_'+module.lstrip("_").upper()
re_pattern = f"{re.escape(search_identifier)}\s*\??=\s*(.+)"
find_config = re.findall(re_pattern, configs)
if not find_config:
continue
find_config = ", ".join([x.strip("$()") for x in find_config])
full_build = int("CIRCUITPY_FULL_BUILD" in find_config)
if not full_build:
default_val = find_config
else:
default_val = "None"
base[module] = {
"name": full_name,
"full_build": str(full_build),
"default_value": default_val,
"excluded": {},
"key": search_identifier,
}
return base
def get_settings_from_makefile(port_dir, board_name):
""" Invoke make in a mode which prints the database, then parse it for
settings.
This means that the effect of all Makefile directives is taken
into account, without having to re-encode the logic that sets them
in this script, something that has proved error-prone
"""
contents = subprocess.run(
["make", "-C", port_dir, f"BOARD={board_name}", "-qp", "print-CC"],
encoding="utf-8",
errors="replace",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Make signals errors with exit status 2; 0 and 1 are "non-error" statuses
if contents.returncode not in (0, 1):
error_msg = (
f"Invoking '{' '.join(contents.args)}' exited with "
f"{contents.returncode}: {contents.stderr}"
)
raise RuntimeError(error_msg)
settings = {}
for line in contents.stdout.split('\n'):
# Handle both = and := definitions.
m = re.match(r'^([A-Z][A-Z0-9_]*) :?= (.*)$', line)
if m:
settings[m.group(1)] = m.group(2)
return settings
def lookup_setting(settings, key, default=''):
while True:
value = settings.get(key, default)
if not value.startswith('$'):
break
key = value[2:-1]
return value
def all_ports_all_boards(ports=SUPPORTED_PORTS):
for port in ports:
port_dir = get_circuitpython_root_dir() / "ports" / port
for entry in (port_dir / "boards").iterdir():
if not entry.is_dir():
continue
yield (port, entry)
def support_matrix_by_board(use_branded_name=True):
""" Compiles a list of the available core modules available for each
board.
"""
base = build_module_map()
def support_matrix(arg):
port, entry = arg
port_dir = get_circuitpython_root_dir() / "ports" / port
settings = get_settings_from_makefile(str(port_dir), entry.name)
if use_branded_name:
with open(entry / "mpconfigboard.h") as get_name:
board_contents = get_name.read()
board_name_re = re.search(r"(?<=MICROPY_HW_BOARD_NAME)\s+(.+)",
board_contents)
if board_name_re:
board_name = board_name_re.group(1).strip('"')
else:
board_name = entry.name
board_modules = []
for module in base:
key = base[module]['key']
if int(lookup_setting(settings, key, '0')):
board_modules.append(base[module]['name'])
board_modules.sort()
# generate alias boards too
board_matrix = [(board_name, board_modules)]
if entry.name in aliases_by_board:
for alias in aliases_by_board[entry.name]:
if use_branded_name:
if alias in aliases_brand_names:
alias = aliases_brand_names[alias]
else:
alias = alias.replace("_"," ").title()
board_matrix.append( (alias, board_modules) )
return board_matrix # this is now a list of (board,modules)
executor = ThreadPoolExecutor(max_workers=os.cpu_count())
mapped_exec = executor.map(support_matrix, all_ports_all_boards())
# flatmap with comprehensions
boards = dict(sorted([board for matrix in mapped_exec for board in matrix]))
# print(json.dumps(boards, indent=2))
return boards
if __name__ == '__main__':
print(json.dumps(support_matrix_by_board(), indent=2))
| 34.596567 | 110 | 0.639747 |
7954e9fb36a62654dd195f865961ef9645f9f960 | 6,622 | py | Python | livesettings/functions.py | craigds/django-livesettings | 134bc0644ada8e657f724fc65b4161066e749230 | [
"BSD-3-Clause"
] | 2 | 2015-12-18T06:17:45.000Z | 2016-10-18T15:59:24.000Z | livesettings/functions.py | craigds/django-livesettings | 134bc0644ada8e657f724fc65b4161066e749230 | [
"BSD-3-Clause"
] | null | null | null | livesettings/functions.py | craigds/django-livesettings | 134bc0644ada8e657f724fc65b4161066e749230 | [
"BSD-3-Clause"
] | null | null | null | from django.utils.translation import ugettext
from livesettings import values
from livesettings.exceptions import SettingNotSet
from livesettings.utils import is_string_like
import logging
import warnings
log = logging.getLogger('configuration')
_NOTSET = object()
class ConfigurationSettings(object):
_instance = None
def __new__(cls, *args, **kwargs):
# for backwards compatibility, make this a singleton.
if ConfigurationSettings._instance is None:
instance = ConfigurationSettings._instance = super(ConfigurationSettings, cls).__new__(cls, *args, **kwargs)
instance.settings = values.SortedDotDict()
instance.prereg = {}
else:
warnings.warn("The ConfigurationSettings singleton is deprecated. Use livesettings.configuration_settings instead", DeprecationWarning)
return ConfigurationSettings._instance
def __getitem__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
key = self._resolve_key(key)
return self.settings.get(key)
def __getattr__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
try:
return self[key]
except KeyError:
raise AttributeError, key
def __iter__(self):
for v in self.groups():
yield v
def __len__(self):
return len(self.settings)
def __contains__(self, key):
key = self._resolve_key(key)
return key in self.settings
def _resolve_key(self, raw):
if is_string_like(raw):
key = raw
elif isinstance(raw, values.ConfigurationGroup):
key = raw.key
else:
group = self.groups()[raw]
key = group.key
return key
def get_config(self, group, key):
try:
if isinstance(group, values.ConfigurationGroup):
group = group.key
cg = self.settings.get(group, None)
if not cg:
raise SettingNotSet('%s config group does not exist' % group)
else:
return cg[key]
except KeyError:
raise SettingNotSet('%s.%s' % (group, key))
def groups(self):
"""Return ordered list"""
values = self.settings.values()
values.sort()
return values
def has_config(self, group, key):
if isinstance(group, values.ConfigurationGroup):
group = group.key
cfg = self.settings.get(group, None)
if cfg and key in cfg:
return True
else:
return False
def preregister_choice(self, group, key, choice):
"""Setup a choice for a group/key which hasn't been instantiated yet."""
k = (group, key)
if self.prereg.has_key(k):
self.prereg[k].append(choice)
else:
self.prereg[k] = [choice]
def register(self, value):
g = value.group
if not isinstance(g, values.ConfigurationGroup):
raise ValueError('value.group should be an instance of ConfigurationGroup')
groupkey = g.key
valuekey = value.key
k = (groupkey, valuekey)
if self.prereg.has_key(k):
for choice in self.prereg[k]:
value.add_choice(choice)
if not groupkey in self.settings:
self.settings[groupkey] = g
self.settings[groupkey][valuekey] = value
return value
def __unicode__(self):
return u"ConfigurationSettings: " + unicode(self.groups())
configuration_settings = ConfigurationSettings()
def config_exists(group, key):
"""Test to see if a setting has been registered"""
return configuration_settings.has_config(group, key)
def config_get(group, key):
"""Get a configuration setting"""
try:
return configuration_settings.get_config(group, key)
except SettingNotSet:
log.debug('SettingNotSet: %s.%s', group, key)
raise
def config_get_group(group):
return configuration_settings[group]
def config_collect_values(group, groupkey, key, unique=True, skip_missing=True):
"""Look up (group, groupkey) from config, then take the values returned and
use them as groups for a second-stage lookup.
For example:
config_collect_values(PAYMENT, MODULES, CREDITCHOICES)
Stage 1: ['PAYMENT_GOOGLE', 'PAYMENT_AUTHORIZENET']
Stage 2: config_value('PAYMENT_GOOGLE', 'CREDITCHOICES')
+ config_value('PAYMENT_AUTHORIZENET', 'CREDITCHOICES')
Stage 3: (if unique is true) remove dupes
"""
groups = config_value(group, groupkey)
ret = []
for g in groups:
try:
ret.append(config_value(g, key))
except KeyError:
if not skip_missing:
raise SettingNotSet('No config %s.%s' % (g, key))
if unique:
out = []
for x in ret:
if not x in out:
out.append(x)
ret = out
return ret
def config_register(value):
"""Register a value or values.
Parameters:
-A Value
"""
return configuration_settings.register(value)
def config_register_list(*args):
for value in args:
config_register(value)
def config_value(group, key, default=_NOTSET):
"""Get a value from the configuration system"""
try:
return config_get(group, key).value
except SettingNotSet:
if default != _NOTSET:
return default
raise
def config_value_safe(group, key, default_value):
"""Get a config value with a default fallback, safe for use during SyncDB."""
raw = default_value
try:
raw = config_value(group, key)
except SettingNotSet:
pass
except ImportError:
log.warn("Error getting %s.%s, OK if you are in SyncDB.", group, key)
return raw
def config_choice_values(group, key, skip_missing=True, translate=False):
"""Get pairs of key, label from the setting."""
try:
cfg = config_get(group, key)
choices = cfg.choice_values
except SettingNotSet:
if skip_missing:
return []
else:
raise SettingNotSet('%s.%s' % (group, key))
if translate:
choices = [(k, ugettext(v)) for k, v in choices]
return choices
def config_add_choice(group, key, choice):
"""Add a choice to a value"""
if config_exists(group, key):
cfg = config_get(group, key)
cfg.add_choice(choice)
else:
configuration_settings.preregister_choice(group, key, choice)
| 28.420601 | 147 | 0.62232 |
7954ea78837f88af5afcabfd5963c5fc9ffc510b | 4,624 | py | Python | nuitka/codegen/LoaderCodes.py | dmikushin/Nuitka | 1e7e05257b9b39ec74be807bfdb0dec11a68e1ed | [
"Apache-2.0"
] | null | null | null | nuitka/codegen/LoaderCodes.py | dmikushin/Nuitka | 1e7e05257b9b39ec74be807bfdb0dec11a68e1ed | [
"Apache-2.0"
] | null | null | null | nuitka/codegen/LoaderCodes.py | dmikushin/Nuitka | 1e7e05257b9b39ec74be807bfdb0dec11a68e1ed | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Code to generate and interact with module loaders.
This is for generating the look-up table for the modules included in a binary
or distribution folder.
"""
from nuitka.ModuleRegistry import getUncompiledNonTechnicalModules
from . import ConstantCodes
from .Indentation import indented
from .templates.CodeTemplatesLoader import (
template_metapath_loader_body,
template_metapath_loader_bytecode_module_entry,
template_metapath_loader_compiled_module_entry,
template_metapath_loader_shlib_module_entry,
)
def getModuleMetapathLoaderEntryCode(module):
module_name = module.getFullName()
if module.isUncompiledPythonModule():
code_data = module.getByteCode()
is_package = module.isUncompiledPythonPackage()
flags = ["NUITKA_BYTECODE_FLAG"]
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
return template_metapath_loader_bytecode_module_entry % {
"module_name": module.getFullName(),
"bytecode": stream_data.getStreamDataOffset(code_data),
"size": len(code_data),
"flags": " | ".join(flags),
}
elif module.isPythonShlibModule():
assert module_name != "__main__"
return template_metapath_loader_shlib_module_entry % {
"module_name": module_name
}
else:
flags = []
if module.isCompiledPythonPackage():
flags.append("NUITKA_PACKAGE_FLAG")
return template_metapath_loader_compiled_module_entry % {
"module_name": module_name,
"module_identifier": module.getCodeName(),
"flags": " | ".join(flags),
}
stream_data = ConstantCodes.stream_data
def getMetapathLoaderBodyCode(other_modules):
metapath_loader_inittab = []
metapath_module_decls = []
for other_module in other_modules:
if other_module.isUncompiledPythonModule():
code_data = other_module.getByteCode()
is_package = other_module.isUncompiledPythonPackage()
flags = ["NUITKA_BYTECODE_FLAG"]
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
metapath_loader_inittab.append(
template_metapath_loader_bytecode_module_entry
% {
"module_name": other_module.getFullName(),
"bytecode": stream_data.getStreamDataOffset(code_data),
"size": len(code_data),
"flags": " | ".join(flags),
}
)
else:
metapath_loader_inittab.append(
getModuleMetapathLoaderEntryCode(module=other_module)
)
if other_module.isCompiledPythonModule():
metapath_module_decls.append(
"%s(%s);"
% (
"MOD_INIT_DECL" if other_module.isTopModule() else "MOD_ENTRY_DECL",
other_module.getCodeName(),
)
)
for uncompiled_module in getUncompiledNonTechnicalModules():
code_data = uncompiled_module.getByteCode()
is_package = uncompiled_module.isUncompiledPythonPackage()
flags = ["NUITKA_BYTECODE_FLAG"]
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
metapath_loader_inittab.append(
template_metapath_loader_bytecode_module_entry
% {
"module_name": uncompiled_module.getFullName(),
"bytecode": stream_data.getStreamDataOffset(code_data),
"size": len(code_data),
"flags": " | ".join(flags),
}
)
return template_metapath_loader_body % {
"metapath_module_decls": indented(metapath_module_decls, 0),
"metapath_loader_inittab": indented(metapath_loader_inittab),
}
| 34.766917 | 88 | 0.647708 |
7954ec7885d6a6baddf325f9507fee8e996b0c7e | 956 | py | Python | dynabuffers-python/dynabuffers/ast/datatype/ArrayType.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 2 | 2019-10-28T12:28:01.000Z | 2020-07-07T12:25:40.000Z | dynabuffers-python/dynabuffers/ast/datatype/ArrayType.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2021-12-21T07:35:22.000Z | 2021-12-21T07:35:22.000Z | dynabuffers-python/dynabuffers/ast/datatype/ArrayType.py | leftshiftone/dynabuffers | c3e94c56989be3df87b50b8d9e17d1ea86199ede | [
"Apache-2.0"
] | 1 | 2020-03-19T09:19:43.000Z | 2020-03-19T09:19:43.000Z | from dynabuffers.api.ISerializable import ISerializable
from dynabuffers.ast.datatype.ByteType import ByteType
class ArrayTypeOptions:
def __init__(self, datatype:ISerializable):
self.datatype = datatype
class ArrayType(ISerializable):
def __init__(self, options):
self.options = options
def size(self, value, registry):
s = sum([self.options.datatype.size(item, registry) for item in value])
return 4 + s
def serialize(self, value, buffer, registry):
buffer.putInt(len(value))
for entry in value:
self.options.datatype.serialize(entry, buffer, registry)
def deserialize(self, buffer, registry):
length = buffer.getInt()
array = []
for i in range(length):
array.append(self.options.datatype.deserialize(buffer, registry))
if isinstance(self.options.datatype, ByteType):
return bytes(array)
return array
| 28.969697 | 79 | 0.666318 |
7954edb21b1eb909fa766e429f6a0cf59945a6e5 | 44,248 | py | Python | tools/idl_parser/idl_parser.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/idl_parser/idl_parser.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/idl_parser/idl_parser.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-03-07T14:20:02.000Z | 2021-03-07T14:20:02.000Z | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parser for Web IDL."""
#
# IDL Parser
#
# The parser uses the PLY yacc library to build a set of parsing rules based
# on Web IDL.
#
# Web IDL, and Web IDL grammar can be found at:
# http://heycam.github.io/webidl/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
#
#
# Disable check for line length and Member as Function due to how grammar rules
# are defined with PLY
#
# pylint: disable=R0201
# pylint: disable=C0301
from __future__ import print_function
import os.path
import sys
import time
# Can't use relative imports if we don't have a parent package.
if __package__:
from .idl_lexer import IDLLexer
from .idl_node import IDLAttribute, IDLNode
else:
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLNode
SRC_DIR = os.path.abspath(os.path.dirname(__file__))
# Preserve sys.path[0] as is.
# https://docs.python.org/3/library/sys.html?highlight=path[0]#sys.path
sys.path.insert(1, os.path.join(SRC_DIR, os.pardir, os.pardir, 'third_party'))
from ply import lex
from ply import yacc
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
_EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = [
'Clamp', 'EnforceRange', 'StringContext', 'TreatNullAs']
def Boolean(val):
"""Convert to strict boolean type."""
if val:
return True
return False
def ListFromConcat(*items):
"""Generate list by concatenating inputs"""
itemsout = []
for item in items:
if item is None:
continue
if type(item) is not type([]):
itemsout.append(item)
else:
itemsout.extend(item)
return itemsout
def ExpandProduction(p):
if type(p) == list:
return '[' + ', '.join([ExpandProduction(x) for x in p]) + ']'
if type(p) == IDLNode:
return 'Node:' + str(p)
if type(p) == IDLAttribute:
return 'Attr:' + str(p)
if type(p) == str:
return 'str:' + p
return '%s:%s' % (p.__class__.__name__, str(p))
# TokenTypeName
#
# Generate a string which has the type and value of the token.
#
def TokenTypeName(t):
if t.type == 'SYMBOL':
return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'string' :
return 'string "%s"' % t.value
if t.type == 'SPECIAL_COMMENT':
return 'comment'
if t.type == t.value:
return '"%s"' % t.value
if t.type == ',':
return 'Comma'
if t.type == 'identifier':
return 'identifier "%s"' % t.value
return 'keyword "%s"' % t.value
# TODO(bashi): Consider moving this out of idl_parser.
def ExtractSpecialComment(comment):
if not comment.startswith('/**'):
raise ValueError('Special comment must start with /**')
if not comment.endswith('*/'):
raise ValueError('Special comment must end with */')
# Remove comment markers
lines = []
for line in comment[2:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it will be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
# TODO(bashi): We may want to keep |line| as is.
line = ''
lines.append(line)
return '\n'.join(lines)
# There are two groups of ExtendedAttributes.
# One group can apply to types (It is said "applicable to types"),
# but the other cannot apply to types.
# This function is intended to divide ExtendedAttributes into those 2 groups.
# For more details at
# https://heycam.github.io/webidl/#extended-attributes-applicable-to-types
def DivideExtAttrsIntoApplicableAndNonApplicable(extended_attribute_list):
if not extended_attribute_list:
return [[], []]
else:
applicable_to_types = []
non_applicable_to_types = []
for ext_attribute in extended_attribute_list.GetChildren():
if ext_attribute.GetName() in _EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES:
applicable_to_types.append(ext_attribute)
else:
non_applicable_to_types.append(ext_attribute)
return [applicable_to_types, non_applicable_to_types]
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as functions where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# The rules can look cryptic at first, but there are a few standard
# transforms from the CST to AST. With these in mind, the actions should
# be reasonably legible.
#
# * Ignore production
# Discard this branch. Primarily used when one alternative is empty.
#
# Sample code:
# if len(p) > 1:
# p[0] = ...
# # Note no assignment if len(p) == 1
#
# * Eliminate singleton production
# Discard this node in the CST, pass the next level down up the tree.
# Used to ignore productions only necessary for parsing, but not needed
# in the AST.
#
# Sample code:
# p[0] = p[1]
#
# * Build node
# The key type of rule. In this parser, produces object of class IDLNode.
# There are several helper functions:
# * BuildProduction: actually builds an IDLNode, based on a production.
# * BuildAttribute: builds an IDLAttribute, which is a temporary
# object to hold a name-value pair, which is then
# set as a Property of the IDLNode when the IDLNode
# is built.
# * BuildNamed: Same as BuildProduction, and sets the 'NAME' property.
# * BuildTrue: BuildAttribute with value True, for flags.
#
# Sample code:
# # Build node of type NodeType, with value p[1], and children.
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Build named node of type NodeType, with name and value p[1].
# # (children optional)
# p[0] = self.BuildNamed('NodeType', p, 1)
#
# # Make a list
# # Used if one node has several children.
# children = ListFromConcat(p[2], p[3])
# p[0] = self.BuildProduction('NodeType', p, 1, children)
#
# # Also used to collapse the right-associative tree
# # produced by parsing a list back into a single list.
# """Foos : Foo Foos
# |"""
# if len(p) > 1:
# p[0] = ListFromConcat(p[1], p[2])
#
# # Add children.
# # Primarily used to add attributes, produced via BuildTrue.
# # p_StaticAttribute
# """StaticAttribute : STATIC Attribute"""
# p[2].AddChildren(self.BuildTrue('STATIC'))
# p[0] = p[2]
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
# The parser is based on the Web IDL standard. See:
# http://heycam.github.io/webidl/#idl-grammar
#
# Productions with a fractional component in the comment denote additions to
# the Web IDL spec, such as allowing string list in extended attributes.
class IDLParser(object):
def p_Definitions(self, p):
"""Definitions : SpecialComments ExtendedAttributeList Definition Definitions
| ExtendedAttributeList Definition Definitions
| """
if len(p) > 4:
special_comments_and_attribs = ListFromConcat(p[1], p[2])
p[3].AddChildren(special_comments_and_attribs)
p[0] = ListFromConcat(p[3], p[4])
elif len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
def p_Definition(self, p):
"""Definition : CallbackOrInterfaceOrMixin
| Namespace
| Partial
| Dictionary
| Enum
| Typedef
| IncludesStatement"""
p[0] = p[1]
# Error recovery for definition
def p_DefinitionError(self, p):
"""Definition : error ';'"""
p[0] = self.BuildError(p, 'Definition')
def p_ArgumentNameKeyword(self, p):
"""ArgumentNameKeyword : ASYNC
| ATTRIBUTE
| CALLBACK
| CONST
| CONSTRUCTOR
| DELETER
| DICTIONARY
| ENUM
| GETTER
| INCLUDES
| INHERIT
| INTERFACE
| ITERABLE
| MAPLIKE
| NAMESPACE
| PARTIAL
| REQUIRED
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| TYPEDEF
| UNRESTRICTED"""
p[0] = p[1]
def p_CallbackOrInterfaceOrMixin(self, p):
"""CallbackOrInterfaceOrMixin : CALLBACK CallbackRestOrInterface
| INTERFACE InterfaceOrMixin"""
p[0] = p[2]
def p_InterfaceOrMixin(self, p):
"""InterfaceOrMixin : InterfaceRest
| MixinRest"""
p[0] = p[1]
def p_InterfaceRest(self, p):
"""InterfaceRest : identifier Inheritance '{' InterfaceMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 1, ListFromConcat(p[2], p[4]))
# Error recovery for interface.
def p_InterfaceRestError(self, p):
"""InterfaceRest : identifier Inheritance '{' error"""
p[0] = self.BuildError(p, 'Interface')
def p_Partial(self, p):
"""Partial : PARTIAL PartialDefinition"""
p[2].AddChildren(self.BuildTrue('PARTIAL'))
p[0] = p[2]
# Error recovery for Partial
def p_PartialError(self, p):
"""Partial : PARTIAL error"""
p[0] = self.BuildError(p, 'Partial')
def p_PartialDefinition(self, p):
"""PartialDefinition : INTERFACE PartialInterfaceOrPartialMixin
| PartialDictionary
| Namespace"""
if len(p) > 2:
p[0] = p[2]
else:
p[0] = p[1]
def p_PartialInterfaceOrPartialMixin(self, p):
"""PartialInterfaceOrPartialMixin : PartialInterfaceRest
| MixinRest"""
p[0] = p[1]
def p_PartialInterfaceRest(self, p):
"""PartialInterfaceRest : identifier '{' PartialInterfaceMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 1, p[3])
def p_InterfaceMembers(self, p):
"""InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# Error recovery for InterfaceMembers
def p_InterfaceMembersError(self, p):
"""InterfaceMembers : error"""
p[0] = self.BuildError(p, 'InterfaceMembers')
def p_InterfaceMember(self, p):
"""InterfaceMember : PartialInterfaceMember
| Constructor"""
p[0] = p[1]
def p_PartialInterfaceMembers(self, p):
"""PartialInterfaceMembers : ExtendedAttributeList PartialInterfaceMember PartialInterfaceMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# Error recovery for InterfaceMembers
def p_PartialInterfaceMembersError(self, p):
"""PartialInterfaceMembers : error"""
p[0] = self.BuildError(p, 'PartialInterfaceMembers')
def p_PartialInterfaceMember(self, p):
"""PartialInterfaceMember : Const
| Operation
| Stringifier
| StaticMember
| Iterable
| AsyncIterable
| ReadonlyMember
| ReadWriteAttribute
| ReadWriteMaplike
| ReadWriteSetlike"""
p[0] = p[1]
def p_Inheritance(self, p):
"""Inheritance : ':' identifier
|"""
if len(p) > 1:
p[0] = self.BuildNamed('Inherit', p, 2)
def p_MixinRest(self, p):
"""MixinRest : MIXIN identifier '{' MixinMembers '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 2, p[4])
p[0].AddChildren(self.BuildTrue('MIXIN'))
def p_MixinMembers(self, p):
"""MixinMembers : ExtendedAttributeList MixinMember MixinMembers
|"""
if len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
# Error recovery for InterfaceMembers
def p_MixinMembersError(self, p):
"""MixinMembers : error"""
p[0] = self.BuildError(p, 'MixinMembers')
def p_MixinMember(self, p):
"""MixinMember : Const
| Operation
| Stringifier
| ReadOnly AttributeRest"""
if len(p) == 2:
p[0] = p[1]
else:
p[2].AddChildren(p[1])
p[0] = p[2]
def p_IncludesStatement(self, p):
"""IncludesStatement : identifier INCLUDES identifier ';'"""
name = self.BuildAttribute('REFERENCE', p[3])
p[0] = self.BuildNamed('Includes', p, 1, name)
def p_CallbackRestOrInterface(self, p):
"""CallbackRestOrInterface : CallbackRest
| INTERFACE InterfaceRest"""
if len(p) < 3:
p[0] = p[1]
else:
p[2].AddChildren(self.BuildTrue('CALLBACK'))
p[0] = p[2]
def p_Const(self, p):
"""Const : CONST ConstType identifier '=' ConstValue ';'"""
value = self.BuildProduction('Value', p, 5, p[5])
p[0] = self.BuildNamed('Const', p, 3, ListFromConcat(p[2], value))
def p_ConstValue(self, p):
"""ConstValue : BooleanLiteral
| FloatLiteral
| integer"""
if type(p[1]) == str:
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'integer'),
self.BuildAttribute('VALUE', p[1]))
else:
p[0] = p[1]
def p_BooleanLiteral(self, p):
"""BooleanLiteral : TRUE
| FALSE"""
value = self.BuildAttribute('VALUE', Boolean(p[1] == 'true'))
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'boolean'), value)
def p_FloatLiteral(self, p):
"""FloatLiteral : float
| '-' INFINITY
| INFINITY
| NAN """
if len(p) > 2:
val = '-Infinity'
else:
val = p[1]
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'float'),
self.BuildAttribute('VALUE', val))
def p_ConstType(self, p):
"""ConstType : PrimitiveType Null
| identifier Null"""
if type(p[1]) == str:
p[0] = self.BuildNamed('Typeref', p, 1, p[2])
else:
p[1].AddChildren(p[2])
p[0] = p[1]
def p_ReadonlyMember(self, p):
"""ReadonlyMember : READONLY ReadonlyMemberRest"""
p[2].AddChildren(self.BuildTrue('READONLY'))
p[0] = p[2]
def p_ReadonlyMemberRest(self, p):
"""ReadonlyMemberRest : AttributeRest
| MaplikeRest
| SetlikeRest"""
p[0] = p[1]
def p_ReadWriteAttribute(self, p):
"""ReadWriteAttribute : INHERIT ReadOnly AttributeRest
| AttributeRest"""
if len(p) > 2:
inherit = self.BuildTrue('INHERIT')
p[3].AddChildren(ListFromConcat(inherit, p[2]))
p[0] = p[3]
else:
p[0] = p[1]
def p_AttributeRest(self, p):
"""AttributeRest : ATTRIBUTE TypeWithExtendedAttributes AttributeName ';'"""
p[0] = self.BuildNamed('Attribute', p, 3, p[2])
def p_AttributeName(self, p):
"""AttributeName : AttributeNameKeyword
| identifier"""
p[0] = p[1]
def p_AttributeNameKeyword(self, p):
"""AttributeNameKeyword : ASYNC
| REQUIRED"""
p[0] = p[1]
def p_ReadOnly(self, p):
"""ReadOnly : READONLY
|"""
if len(p) > 1:
p[0] = self.BuildTrue('READONLY')
def p_DefaultValue(self, p):
"""DefaultValue : ConstValue
| string
| '[' ']'
| '{' '}'
| null"""
if len(p) == 3:
if p[1] == '[':
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'sequence'),
self.BuildAttribute('VALUE', '[]'))
else:
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'dictionary'),
self.BuildAttribute('VALUE', '{}'))
elif type(p[1]) == str:
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'DOMString'),
self.BuildAttribute('VALUE', p[1]))
else:
p[0] = p[1]
def p_Operation(self, p):
"""Operation : RegularOperation
| SpecialOperation"""
p[0] = p[1]
def p_RegularOperation(self, p):
"""RegularOperation : ReturnType OperationRest"""
p[2].AddChildren(p[1])
p[0] = p[2]
def p_SpecialOperation(self, p):
"""SpecialOperation : Special RegularOperation"""
p[2].AddChildren(p[1])
p[0] = p[2]
def p_Special(self, p):
"""Special : GETTER
| SETTER
| DELETER"""
p[0] = self.BuildTrue(p[1].upper())
def p_OperationRest(self, p):
"""OperationRest : OptionalOperationName '(' ArgumentList ')' ';'"""
arguments = self.BuildProduction('Arguments', p, 2, p[3])
p[0] = self.BuildNamed('Operation', p, 1, arguments)
def p_OptionalOperationName(self, p):
"""OptionalOperationName : OperationName
|"""
if len(p) > 1:
p[0] = p[1]
else:
p[0] = ''
def p_OperationName(self, p):
"""OperationName : OperationNameKeyword
| identifier"""
p[0] = p[1]
def p_OperationNameKeyword(self, p):
"""OperationNameKeyword : INCLUDES"""
p[0] = p[1]
def p_ArgumentList(self, p):
"""ArgumentList : Argument Arguments
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[1], p[2])
# ArgumentList error recovery
def p_ArgumentListError(self, p):
"""ArgumentList : error """
p[0] = self.BuildError(p, 'ArgumentList')
def p_Arguments(self, p):
"""Arguments : ',' Argument Arguments
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
# Arguments error recovery
def p_ArgumentsError(self, p):
"""Arguments : ',' error"""
p[0] = self.BuildError(p, 'Arguments')
def p_Argument(self, p):
"""Argument : ExtendedAttributeList OPTIONAL TypeWithExtendedAttributes ArgumentName Default
| ExtendedAttributeList Type Ellipsis ArgumentName"""
if len(p) > 5:
p[0] = self.BuildNamed('Argument', p, 4, ListFromConcat(p[3], p[5]))
p[0].AddChildren(self.BuildTrue('OPTIONAL'))
p[0].AddChildren(p[1])
else:
applicable_to_types, non_applicable_to_types = \
DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
if applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
applicable_to_types)
p[2].AddChildren(attributes)
p[0] = self.BuildNamed('Argument', p, 4, ListFromConcat(p[2], p[3]))
if non_applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
non_applicable_to_types)
p[0].AddChildren(attributes)
def p_ArgumentName(self, p):
"""ArgumentName : ArgumentNameKeyword
| identifier"""
p[0] = p[1]
def p_Ellipsis(self, p):
"""Ellipsis : ELLIPSIS
|"""
if len(p) > 1:
p[0] = self.BuildNamed('Argument', p, 1)
p[0].AddChildren(self.BuildTrue('ELLIPSIS'))
def p_ReturnType(self, p):
"""ReturnType : Type
| VOID"""
if p[1] == 'void':
p[0] = self.BuildProduction('Type', p, 1)
p[0].AddChildren(self.BuildNamed('PrimitiveType', p, 1))
else:
p[0] = p[1]
def p_Constructor(self, p):
"""Constructor : CONSTRUCTOR '(' ArgumentList ')' ';'"""
arguments = self.BuildProduction('Arguments', p, 1, p[3])
p[0] = self.BuildProduction('Constructor', p, 1, arguments)
def p_Stringifier(self, p):
"""Stringifier : STRINGIFIER StringifierRest"""
p[0] = self.BuildProduction('Stringifier', p, 1, p[2])
def p_StringifierRest(self, p):
"""StringifierRest : ReadOnly AttributeRest
| ReturnType OperationRest
| ';'"""
if len(p) == 3:
p[2].AddChildren(p[1])
p[0] = p[2]
def p_StaticMember(self, p):
"""StaticMember : STATIC StaticMemberRest"""
p[2].AddChildren(self.BuildTrue('STATIC'))
p[0] = p[2]
def p_StaticMemberRest(self, p):
"""StaticMemberRest : ReadOnly AttributeRest
| ReturnType OperationRest"""
if len(p) == 2:
p[0] = p[1]
else:
p[2].AddChildren(p[1])
p[0] = p[2]
def p_Iterable(self, p):
"""Iterable : ITERABLE '<' TypeWithExtendedAttributes OptionalType '>' ';'"""
childlist = ListFromConcat(p[3], p[4])
p[0] = self.BuildProduction('Iterable', p, 2, childlist)
def p_OptionalType(self, p):
"""OptionalType : ',' TypeWithExtendedAttributes
|"""
if len(p) > 1:
p[0] = p[2]
def p_AsyncIterable(self, p):
"""AsyncIterable : ASYNC ITERABLE '<' TypeWithExtendedAttributes ',' TypeWithExtendedAttributes '>' ';'"""
childlist = ListFromConcat(p[4], p[6])
p[0] = self.BuildProduction('AsyncIterable', p, 2, childlist)
def p_ReadWriteMaplike(self, p):
"""ReadWriteMaplike : MaplikeRest"""
p[0] = p[1]
def p_MaplikeRest(self, p):
"""MaplikeRest : MAPLIKE '<' TypeWithExtendedAttributes ',' TypeWithExtendedAttributes '>' ';'"""
childlist = ListFromConcat(p[3], p[5])
p[0] = self.BuildProduction('Maplike', p, 2, childlist)
def p_ReadWriteSetlike(self, p):
"""ReadWriteSetlike : SetlikeRest"""
p[0] = p[1]
def p_SetlikeRest(self, p):
"""SetlikeRest : SETLIKE '<' TypeWithExtendedAttributes '>' ';'"""
p[0] = self.BuildProduction('Setlike', p, 2, p[3])
def p_Namespace(self, p):
"""Namespace : NAMESPACE identifier '{' NamespaceMembers '}' ';'"""
p[0] = self.BuildNamed('Namespace', p, 2, p[4])
# Error recovery for namespace.
def p_NamespaceError(self, p):
"""Namespace : NAMESPACE identifier '{' error"""
p[0] = self.BuildError(p, 'Namespace')
def p_NamespaceMembers(self, p):
"""NamespaceMembers : NamespaceMember NamespaceMembers
| """
if len(p) > 1:
p[0] = ListFromConcat(p[1], p[2])
# Error recovery for NamespaceMembers
def p_NamespaceMembersError(self, p):
"""NamespaceMembers : ExtendedAttributeList error"""
p[0] = self.BuildError(p, 'NamespaceMembers')
def p_NamespaceMember(self, p):
"""NamespaceMember : ExtendedAttributeList ReturnType OperationRest
| ExtendedAttributeList READONLY AttributeRest"""
if p[2] != 'readonly':
applicable_to_types, non_applicable_to_types = \
DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
if applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
applicable_to_types)
p[2].AddChildren(attributes)
p[3].AddChildren(p[2])
if non_applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
non_applicable_to_types)
p[3].AddChildren(attributes)
else:
p[3].AddChildren(self.BuildTrue('READONLY'))
p[3].AddChildren(p[1])
p[0] = p[3]
def p_Dictionary(self, p):
"""Dictionary : DICTIONARY identifier Inheritance '{' DictionaryMembers '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 2, ListFromConcat(p[3], p[5]))
# Error recovery for regular Dictionary
def p_DictionaryError(self, p):
"""Dictionary : DICTIONARY error ';'"""
p[0] = self.BuildError(p, 'Dictionary')
# Error recovery for regular Dictionary
# (for errors inside dictionary definition)
def p_DictionaryError2(self, p):
"""Dictionary : DICTIONARY identifier Inheritance '{' error"""
p[0] = self.BuildError(p, 'Dictionary')
def p_DictionaryMembers(self, p):
"""DictionaryMembers : DictionaryMember DictionaryMembers
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[1], p[2])
# Error recovery for DictionaryMembers
def p_DictionaryMembersError(self, p):
"""DictionaryMembers : ExtendedAttributeList error"""
p[0] = self.BuildError(p, 'DictionaryMembers')
def p_DictionaryMember(self, p):
"""DictionaryMember : ExtendedAttributeList REQUIRED TypeWithExtendedAttributes identifier Default ';'
| ExtendedAttributeList Type identifier Default ';'"""
if len(p) > 6:
p[2] = self.BuildTrue('REQUIRED')
p[0] = self.BuildNamed('Key', p, 4, ListFromConcat(p[2], p[3], p[5]))
p[0].AddChildren(p[1])
else:
applicable_to_types, non_applicable_to_types = \
DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
if applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
applicable_to_types)
p[2].AddChildren(attributes)
p[0] = self.BuildNamed('Key', p, 3, ListFromConcat(p[2], p[4]))
if non_applicable_to_types:
attributes = self.BuildProduction('ExtAttributes', p, 1,
non_applicable_to_types)
p[0].AddChildren(attributes)
def p_PartialDictionary(self, p):
"""PartialDictionary : DICTIONARY identifier '{' DictionaryMembers '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 2, p[4])
# Error recovery for Partial Dictionary
def p_PartialDictionaryError(self, p):
"""PartialDictionary : DICTIONARY error ';'"""
p[0] = self.BuildError(p, 'PartialDictionary')
def p_Default(self, p):
"""Default : '=' DefaultValue
|"""
if len(p) > 1:
p[0] = self.BuildProduction('Default', p, 2, p[2])
def p_Enum(self, p):
"""Enum : ENUM identifier '{' EnumValueList '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 2, p[4])
# Error recovery for Enums
def p_EnumError(self, p):
"""Enum : ENUM error ';'"""
p[0] = self.BuildError(p, 'Enum')
def p_EnumValueList(self, p):
"""EnumValueList : string EnumValueListComma"""
enum = self.BuildNamed('EnumItem', p, 1)
p[0] = ListFromConcat(enum, p[2])
def p_EnumValueListComma(self, p):
"""EnumValueListComma : ',' EnumValueListString
|"""
if len(p) > 1:
p[0] = p[2]
def p_EnumValueListString(self, p):
"""EnumValueListString : string EnumValueListComma
|"""
if len(p) > 1:
enum = self.BuildNamed('EnumItem', p, 1)
p[0] = ListFromConcat(enum, p[2])
def p_CallbackRest(self, p):
"""CallbackRest : identifier '=' ReturnType '(' ArgumentList ')' ';'"""
arguments = self.BuildProduction('Arguments', p, 4, p[5])
p[0] = self.BuildNamed('Callback', p, 1, ListFromConcat(p[3], arguments))
def p_Typedef(self, p):
"""Typedef : TYPEDEF TypeWithExtendedAttributes identifier ';'"""
p[0] = self.BuildNamed('Typedef', p, 3, p[2])
# Error recovery for Typedefs
def p_TypedefError(self, p):
"""Typedef : TYPEDEF error ';'"""
p[0] = self.BuildError(p, 'Typedef')
def p_Type(self, p):
"""Type : SingleType
| UnionType Null"""
if len(p) == 2:
p[0] = self.BuildProduction('Type', p, 1, p[1])
else:
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[1], p[2]))
def p_TypeWithExtendedAttributes(self, p):
""" TypeWithExtendedAttributes : ExtendedAttributeList SingleType
| ExtendedAttributeList UnionType Null"""
if len(p) < 4:
p[0] = self.BuildProduction('Type', p, 2, p[2])
else:
p[0] = self.BuildProduction('Type', p, 2, ListFromConcat(p[2], p[3]))
p[0].AddChildren(p[1])
def p_SingleType(self, p):
"""SingleType : DistinguishableType
| ANY
| PromiseType"""
if p[1] != 'any':
p[0] = p[1]
else:
p[0] = self.BuildProduction('Any', p, 1)
def p_UnionType(self, p):
"""UnionType : '(' UnionMemberType OR UnionMemberType UnionMemberTypes ')'"""
members = ListFromConcat(p[2], p[4], p[5])
p[0] = self.BuildProduction('UnionType', p, 1, members)
def p_UnionMemberType(self, p):
"""UnionMemberType : ExtendedAttributeList DistinguishableType
| UnionType Null"""
if p[1] is None:
p[0] = self.BuildProduction('Type', p, 1, p[2])
elif p[1].GetClass() == 'ExtAttributes':
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[2], p[1]))
else:
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[1], p[2]))
def p_UnionMemberTypes(self, p):
"""UnionMemberTypes : OR UnionMemberType UnionMemberTypes
|"""
if len(p) > 2:
p[0] = ListFromConcat(p[2], p[3])
# Moved BYTESTRING, DOMSTRING, OBJECT to PrimitiveType
# Moving all built-in types into PrimitiveType makes it easier to
# differentiate between them and 'identifier', since p[1] would be a string in
# both cases.
def p_DistinguishableType(self, p):
"""DistinguishableType : PrimitiveType Null
| identifier Null
| SEQUENCE '<' TypeWithExtendedAttributes '>' Null
| FROZENARRAY '<' TypeWithExtendedAttributes '>' Null
| RecordType Null"""
if len(p) == 3:
if type(p[1]) == str:
typeref = self.BuildNamed('Typeref', p, 1)
else:
typeref = p[1]
p[0] = ListFromConcat(typeref, p[2])
if len(p) == 6:
cls = 'Sequence' if p[1] == 'sequence' else 'FrozenArray'
p[0] = self.BuildProduction(cls, p, 1, p[3])
p[0] = ListFromConcat(p[0], p[5])
# Added StringType, OBJECT
def p_PrimitiveType(self, p):
"""PrimitiveType : UnsignedIntegerType
| UnrestrictedFloatType
| StringType
| BOOLEAN
| BYTE
| OCTET
| OBJECT"""
if type(p[1]) == str:
p[0] = self.BuildNamed('PrimitiveType', p, 1)
else:
p[0] = p[1]
def p_UnrestrictedFloatType(self, p):
"""UnrestrictedFloatType : UNRESTRICTED FloatType
| FloatType"""
if len(p) == 2:
typeref = self.BuildNamed('PrimitiveType', p, 1)
else:
typeref = self.BuildNamed('PrimitiveType', p, 2)
typeref.AddChildren(self.BuildTrue('UNRESTRICTED'))
p[0] = typeref
def p_FloatType(self, p):
"""FloatType : FLOAT
| DOUBLE"""
p[0] = p[1]
def p_UnsignedIntegerType(self, p):
"""UnsignedIntegerType : UNSIGNED IntegerType
| IntegerType"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 'unsigned ' + p[2]
def p_IntegerType(self, p):
"""IntegerType : SHORT
| LONG OptionalLong"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] + p[2]
def p_OptionalLong(self, p):
"""OptionalLong : LONG
| """
if len(p) > 1:
p[0] = ' ' + p[1]
else:
p[0] = ''
def p_StringType(self, p):
"""StringType : BYTESTRING
| DOMSTRING
| USVSTRING"""
p[0] = self.BuildNamed('StringType', p, 1)
def p_PromiseType(self, p):
"""PromiseType : PROMISE '<' ReturnType '>'"""
p[0] = self.BuildNamed('Promise', p, 1, p[3])
def p_RecordType(self, p):
"""RecordType : RECORD '<' StringType ',' TypeWithExtendedAttributes '>'"""
p[0] = self.BuildProduction('Record', p, 2, ListFromConcat(p[3], p[5]))
# Error recovery for RecordType.
def p_RecordTypeError(self, p):
"""RecordType : RECORD '<' error ',' Type '>'"""
p[0] = self.BuildError(p, 'RecordType')
def p_Null(self, p):
"""Null : '?'
|"""
if len(p) > 1:
p[0] = self.BuildTrue('NULLABLE')
# This rule has custom additions (i.e. SpecialComments).
def p_ExtendedAttributeList(self, p):
"""ExtendedAttributeList : '[' ExtendedAttribute ExtendedAttributes ']'
| """
if len(p) > 4:
items = ListFromConcat(p[2], p[3])
p[0] = self.BuildProduction('ExtAttributes', p, 1, items)
# Error recovery for ExtendedAttributeList
def p_ExtendedAttributeListError(self, p):
"""ExtendedAttributeList : '[' ExtendedAttribute ',' error"""
p[0] = self.BuildError(p, 'ExtendedAttributeList')
def p_ExtendedAttributes(self, p):
"""ExtendedAttributes : ',' ExtendedAttribute ExtendedAttributes
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
# https://heycam.github.io/webidl/#idl-extended-attributes
# The ExtendedAttribute symbol in Web IDL grammar is very flexible but we
# only support following patterns:
# [ identifier ]
# [ identifier ( ArgumentList ) ]
# [ identifier = identifier ]
# [ identifier = ( IdentifierList ) ]
# [ identifier = identifier ( ArgumentList ) ]
# [ identifier = ( StringList ) ]
# The first five patterns are specified in the Web IDL spec and the last
# pattern is Blink's custom extension to support [ReflectOnly].
def p_ExtendedAttribute(self, p):
"""ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeIdentList
| ExtendedAttributeNamedArgList
| ExtendedAttributeStringLiteral
| ExtendedAttributeStringLiteralList"""
p[0] = p[1]
# Add definition for NULL
def p_null(self, p):
"""null : NULL"""
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'NULL'),
self.BuildAttribute('VALUE', 'NULL'))
def p_IdentifierList(self, p):
"""IdentifierList : identifier Identifiers"""
p[0] = ListFromConcat(p[1], p[2])
def p_Identifiers(self, p):
"""Identifiers : ',' identifier Identifiers
|"""
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
def p_ExtendedAttributeNoArgs(self, p):
"""ExtendedAttributeNoArgs : identifier"""
p[0] = self.BuildNamed('ExtAttribute', p, 1)
def p_ExtendedAttributeArgList(self, p):
"""ExtendedAttributeArgList : identifier '(' ArgumentList ')'"""
arguments = self.BuildProduction('Arguments', p, 2, p[3])
p[0] = self.BuildNamed('ExtAttribute', p, 1, arguments)
def p_ExtendedAttributeIdent(self, p):
"""ExtendedAttributeIdent : identifier '=' identifier"""
value = self.BuildAttribute('VALUE', p[3])
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
def p_ExtendedAttributeIdentList(self, p):
"""ExtendedAttributeIdentList : identifier '=' '(' IdentifierList ')'"""
value = self.BuildAttribute('VALUE', p[4])
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
def p_ExtendedAttributeNamedArgList(self, p):
"""ExtendedAttributeNamedArgList : identifier '=' identifier '(' ArgumentList ')'"""
args = self.BuildProduction('Arguments', p, 4, p[5])
value = self.BuildNamed('Call', p, 3, args)
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# Blink extension: Add support for string literal Extended Attribute values
def p_ExtendedAttributeStringLiteral(self, p):
"""ExtendedAttributeStringLiteral : identifier '=' StringLiteral"""
def UnwrapString(ls):
"""Reach in and grab the string literal's "NAME"."""
return ls[1].value
value = self.BuildAttribute('VALUE', UnwrapString(p[3]))
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# Blink extension: Add support for compound Extended Attribute values over
# string literals ("A","B")
def p_ExtendedAttributeStringLiteralList(self, p):
"""ExtendedAttributeStringLiteralList : identifier '=' '(' StringLiteralList ')'"""
value = self.BuildAttribute('VALUE', p[4])
p[0] = self.BuildNamed('ExtAttribute', p, 1, value)
# Blink extension: One or more string literals. The values aren't propagated
# as literals, but their by their value only.
def p_StringLiteralList(self, p):
"""StringLiteralList : StringLiteral ',' StringLiteralList
| StringLiteral"""
def UnwrapString(ls):
"""Reach in and grab the string literal's "NAME"."""
return ls[1].value
if len(p) > 3:
p[0] = ListFromConcat(UnwrapString(p[1]), p[3])
else:
p[0] = ListFromConcat(UnwrapString(p[1]))
# Blink extension: Wrap string literal.
def p_StringLiteral(self, p):
"""StringLiteral : string"""
p[0] = ListFromConcat(self.BuildAttribute('TYPE', 'DOMString'),
self.BuildAttribute('NAME', p[1]))
# Blink extension: Treat special comments (/** ... */) as AST nodes to
# annotate other nodes. Currently they are used for testing.
def p_SpecialComments(self, p):
"""SpecialComments : SPECIAL_COMMENT SpecialComments
| """
if len(p) > 1:
p[0] = ListFromConcat(self.BuildSpecialComment(p, 1), p[2])
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recovery happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
last = self.LastToken()
lineno = last.lineno
pos = last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
self._last_error_msg = msg
self._last_error_lineno = lineno
self._last_error_pos = pos
def Warn(self, node, msg):
sys.stdout.write(node.GetLogLine(msg))
self.parse_warnings += 1
def LastToken(self):
return self.lexer.last
def __init__(self, lexer, verbose=False, debug=False, mute_error=False):
self.lexer = lexer
self.tokens = lexer.KnownTokens()
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=debug,
optimize=0, write_tables=0)
# TODO: Make our code compatible with defaulted_states. Currently disabled
# for compatibility.
self.yaccobj.defaulted_states = {}
self.parse_debug = debug
self.verbose = verbose
self.mute_error = mute_error
self._parse_errors = 0
self._parse_warnings = 0
self._last_error_msg = None
self._last_error_lineno = 0
self._last_error_pos = 0
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# cls - The type of item being producted
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
try:
if not childlist:
childlist = []
filename = self.lexer.Lexer().filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
return out
except:
print('Exception while parsing:')
for num, item in enumerate(p):
print(' [%d] %s' % (num, ExpandProduction(item)))
if self.LastToken():
print('Last token: %s' % str(self.LastToken()))
raise
def BuildNamed(self, cls, p, index, childlist=None):
childlist = ListFromConcat(childlist)
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildSpecialComment(self, p, index):
name = ExtractSpecialComment(p[index])
childlist = [self.BuildAttribute('NAME', name)]
return self.BuildProduction('SpecialComment', p, index, childlist)
#
# BuildError
#
# Build and Error node as part of the recovery process.
#
#
def BuildError(self, p, prod):
self._parse_errors += 1
name = self.BuildAttribute('NAME', self._last_error_msg)
line = self.BuildAttribute('LINENO', self._last_error_lineno)
pos = self.BuildAttribute('POSITION', self._last_error_pos)
prod = self.BuildAttribute('PROD', prod)
node = self.BuildProduction('Error', p, 1,
ListFromConcat(name, line, pos, prod))
if not self.mute_error:
node.Error(self._last_error_msg)
return node
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
def BuildFalse(self, key):
return IDLAttribute(key, Boolean(False))
def BuildTrue(self, key):
return IDLAttribute(key, Boolean(True))
def GetErrors(self):
# Access lexer errors, despite being private
# pylint: disable=W0212
return self._parse_errors + self.lexer._lex_errors
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseText(self, filename, data):
self._parse_errors = 0
self._parse_warnings = 0
self._last_error_msg = None
self._last_error_lineno = 0
self._last_error_pos = 0
try:
self.lexer.Tokenize(data, filename)
nodes = self.yaccobj.parse(lexer=self.lexer) or []
name = self.BuildAttribute('NAME', filename)
return IDLNode('File', filename, 0, 0, nodes + [name])
except lex.LexError as lexError:
sys.stderr.write('Error in token: %s\n' % str(lexError))
return None
def ParseFile(parser, filename):
"""Parse a file and return a File type of node."""
with open(filename) as fileobject:
try:
out = parser.ParseText(filename, fileobject.read())
out.SetProperty('DATETIME', time.ctime(os.path.getmtime(filename)))
out.SetProperty('ERRORS', parser.GetErrors())
return out
except Exception as e:
last = parser.LastToken()
sys.stderr.write('%s(%d) : Internal parsing error\n\t%s.\n' % (
filename, last.lineno, str(e)))
def main(argv):
nodes = []
parser = IDLParser(IDLLexer())
errors = 0
for filename in argv:
filenode = ParseFile(parser, filename)
if (filenode):
errors += filenode.GetProperty('ERRORS')
nodes.append(filenode)
ast = IDLNode('AST', '__AST__', 0, 0, nodes)
print('\n'.join(ast.Tree()))
if errors:
print('\nFound %d errors.\n' % errors)
return errors
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 33.244177 | 110 | 0.605406 |
7954edb31ecff2dab11975302a5c1f61e65dedba | 57,420 | py | Python | src/trunk/apps/fdsnws/fdsnws/gnupg.py | damb/seiscomp3 | 560a8f7ae43737ae7826fb1ffca76a9f601cf9dc | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 94 | 2015-02-04T13:57:34.000Z | 2021-11-01T15:10:06.000Z | src/trunk/apps/fdsnws/fdsnws/gnupg.py | damb/seiscomp3 | 560a8f7ae43737ae7826fb1ffca76a9f601cf9dc | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 233 | 2015-01-28T15:16:46.000Z | 2021-08-23T11:31:37.000Z | src/trunk/apps/fdsnws/fdsnws/gnupg.py | damb/seiscomp3 | 560a8f7ae43737ae7826fb1ffca76a9f601cf9dc | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 95 | 2015-02-13T15:53:30.000Z | 2021-11-02T14:54:54.000Z | """ A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, stevegt@terraluna.org
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2018 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
__version__ = "0.4.3"
__author__ = "Vinay Sajip"
__date__ = "$13-Jun-2018 12:11:43$"
try:
from io import StringIO
except ImportError: # pragma: no cover
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import re
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
STARTUPINFO = None
if os.name == 'nt': # pragma: no cover
try:
from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE
except ImportError:
STARTUPINFO = None
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
string_types = basestring
text_type = unicode
except NameError:
_py3k = True
string_types = str
text_type = str
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
# We use the test below because it works for Jython as well as CPython
if os.path.__name__ == 'ntpath': # pragma: no cover
# On Windows, we don't need shell quoting, other than worrying about
# paths with spaces in them.
def shell_quote(s):
return '"%s"' % s
else:
# Section copied from sarge
# This regex determines which shell input needs quoting
# because it may be unsafe
UNSAFE = re.compile(r'[^\w%+,./:=@-]')
def shell_quote(s):
"""
Quote text so that it is safe for Posix command shells.
For example, "*.py" would be converted to "'*.py'". If the text is
considered safe it is returned unquoted.
:param s: The value to quote
:type s: str (or unicode on 2.x)
:return: A safe version of the input, from the point of view of Posix
command shells
:rtype: The passed-in type
"""
if not isinstance(s, string_types): # pragma: no cover
raise TypeError('Expected string type, got %s' % type(s))
if not s:
result = "''"
elif not UNSAFE.search(s):
result = s
else:
result = "'%s'" % s.replace("'", r"'\''")
return result
# end of sarge code
# Now that we use shell=False, we shouldn't need to quote arguments.
# Use no_quote instead of shell_quote to remind us of where quoting
# was needed. However, note that we still need, on 2.x, to encode any
# Unicode argument with the file system encoding - see Issue #41 and
# Python issue #1759845 ("subprocess.call fails with unicode strings in
# command line").
# Allows the encoding used to be overridden in special cases by setting
# this module attribute appropriately.
fsencoding = sys.getfilesystemencoding()
def no_quote(s):
if not _py3k and isinstance(s, text_type):
s = s.encode(fsencoding)
return s
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else: # pragma: no cover
enc = 'ascii'
while True:
# See issue #39: read can fail when e.g. a text stream is provided
# for what is actually a binary file
try:
data = instream.read(1024)
except UnicodeError:
logger.warning('Exception occurred while reading', exc_info=1)
break
if not data:
break
sent += len(data)
# logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError: # pragma: no cover
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError: # pragma: no cover
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug('Wrote passphrase')
def _is_sequence(instance):
return isinstance(instance, (list, tuple, set, frozenset))
def _make_memory_stream(s):
try:
from io import BytesIO
rv = BytesIO(s)
except ImportError: # pragma: no cover
rv = StringIO(s)
return rv
def _make_binary_stream(s, encoding):
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
return _make_memory_stream(s)
class Verify(object):
"Handle status messages for --verify"
TRUST_UNDEFINED = 0
TRUST_NEVER = 1
TRUST_MARGINAL = 2
TRUST_FULLY = 3
TRUST_ULTIMATE = 4
TRUST_LEVELS = {
"TRUST_UNDEFINED": TRUST_UNDEFINED,
"TRUST_NEVER": TRUST_NEVER,
"TRUST_MARGINAL": TRUST_MARGINAL,
"TRUST_FULLY": TRUST_FULLY,
"TRUST_ULTIMATE": TRUST_ULTIMATE,
}
# for now, just the most common error codes. This can be expanded as and
# when reports come in of other errors.
GPG_SYSTEM_ERROR_CODES = {
1: 'permission denied',
35: 'file exists',
81: 'file not found',
97: 'not a directory',
}
GPG_ERROR_CODES = {
11: 'incorrect passphrase',
}
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
self.key_id = None
self.key_status = None
self.status = None
self.pubkey_fingerprint = None
self.expire_timestamp = None
self.sig_timestamp = None
self.trust_text = None
self.trust_level = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in self.TRUST_LEVELS:
self.trust_text = key
self.trust_level = self.TRUST_LEVELS[key]
elif key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "BADSIG": # pragma: no cover
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "ERRSIG": # pragma: no cover
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "EXPSIG": # pragma: no cover
self.valid = False
self.status = 'signature expired'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "DECRYPTION_FAILED": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'decryption failed'
elif key == "NO_PUBKEY": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
if key == "EXPKEYSIG":
self.key_status = 'signing key has expired'
else:
self.key_status = 'signing key was revoked'
self.status = self.key_status
elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover
self.valid = False
self.key_id = value
if key == "UNEXPECTED":
self.status = 'unexpected data'
else:
# N.B. there might be other reasons. For example, if an output
# file can't be created - /dev/null/foo will lead to a
# "not a directory" error, but which is not sent as a status
# message with the [GNUPG:] prefix. Similarly if you try to
# write to "/etc/foo" as a non-root user, a "permission denied"
# error will be sent as a non-status message.
message = 'error - %s' % value
parts = value.split()
if parts[-1].isdigit():
code = int(parts[-1])
system_error = bool(code & 0x8000)
code = code & 0x7FFF
if system_error:
mapping = self.GPG_SYSTEM_ERROR_CODES
else:
mapping = self.GPG_ERROR_CODES
if code in mapping:
message = mapping[code]
if not self.status:
self.status = message
elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH",
"NO_SECKEY", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported:
return False
if not self.fingerprints:
return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key in ("IMPORTED", "KEY_CONSIDERED"):
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM": # pragma: no cover
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i, count in enumerate(self.counts):
setattr(self, count, int(import_res[i]))
elif key == "KEYEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
elif key == "FAILURE": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Other failure'})
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def summary(self):
l = []
l.append('%d imported' % self.imported)
if self.not_imported: # pragma: no cover
l.append('%d not imported' % self.not_imported)
return ', '.join(l)
ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I)
BASIC_ESCAPES = {
r'\n': '\n',
r'\r': '\r',
r'\f': '\f',
r'\v': '\v',
r'\b': '\b',
r'\0': '\0',
}
class SendResult(object):
def __init__(self, gpg):
self.gpg = gpg
def handle_status(self, key, value):
logger.debug('SendResult: %s: %s', key, value)
def _set_fields(target, fieldnames, args):
for i, var in enumerate(fieldnames):
if i < len(args):
target[var] = args[i]
else:
target[var] = 'unavailable'
class SearchKeys(list):
''' Handle status messages for --search-keys.
Handle pub and uid (relating the latter to the former).
Don't care about the rest
'''
UID_INDEX = 1
FIELDS = 'type keyid algo length date expires'.split()
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def get_fields(self, args):
result = {}
_set_fields(result, self.FIELDS, args)
result['uids'] = []
result['sigs'] = []
return result
def pub(self, args):
self.curkey = curkey = self.get_fields(args)
self.append(curkey)
def uid(self, args):
uid = args[self.UID_INDEX]
uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid)
for k, v in BASIC_ESCAPES.items():
uid = uid.replace(k, v)
self.curkey['uids'].append(uid)
self.uids.append(uid)
def handle_status(self, key, value): # pragma: no cover
pass
class ListKeys(SearchKeys):
''' Handle status messages for --list-keys, --list-sigs.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
UID_INDEX = 9
FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig cap issuer flag token hash curve compliance updated origin'.split()
def __init__(self, gpg):
super(ListKeys, self).__init__(gpg)
self.in_subkey = False
self.key_map = {}
def key(self, args):
self.curkey = curkey = self.get_fields(args)
if curkey['uid']:
curkey['uids'].append(curkey['uid'])
del curkey['uid']
curkey['subkeys'] = []
self.append(curkey)
self.in_subkey = False
pub = sec = key
def fpr(self, args):
fp = args[9]
if fp in self.key_map: # pragma: no cover
raise ValueError('Unexpected fingerprint collision: %s' % fp)
if not self.in_subkey:
self.curkey['fingerprint'] = fp
self.fingerprints.append(fp)
self.key_map[fp] = self.curkey
else:
self.curkey['subkeys'][-1].append(fp)
self.key_map[fp] = self.curkey
def _collect_subkey_info(self, curkey, args):
info_map = curkey.setdefault('subkey_info', {})
info = {}
_set_fields(info, self.FIELDS, args)
info_map[args[4]] = info
def sub(self, args):
# See issue #81. We create a dict with more information about
# subkeys, but for backward compatibility reason, have to add it in
# as a separate entry 'subkey_info'
subkey = [args[4], args[11]] # keyid, type
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
def ssb(self, args):
subkey = [args[4], None] # keyid, type
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
def sig(self, args):
# keyid, uid, sigclass
self.curkey['sigs'].append((args[4], args[9], args[10]))
class ScanKeys(ListKeys):
''' Handle status messages for --with-fingerprint.'''
def sub(self, args):
# --with-fingerprint --with-colons somehow outputs fewer colons,
# use the last value args[-1] instead of args[11]
subkey = [args[4], args[-1]]
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
class TextHandler(object):
def _as_text(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
if _py3k:
__str__ = _as_text
else:
__unicode__ = _as_text
def __str__(self):
return self.data
class Crypt(Verify, TextHandler):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
self.key_id = None
def __nonzero__(self):
if self.ok:
return True
return False
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "NODATA":
self.status = "no data was provided"
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP": # pragma: no cover
self.status = 'invalid recipient'
elif key == "KEYEXPIRED": # pragma: no cover
self.status = 'key expired'
elif key == "SIG_CREATED": # pragma: no cover
self.status = 'sig created'
elif key == "SIGEXPIRED": # pragma: no cover
self.status = 'sig expired'
elif key == "ENC_TO": # pragma: no cover
# ENC_TO <long_keyid> <keytype> <keylength>
self.key_id = value.split(' ', 1)[0]
elif key in ("USERID_HINT", "GOODMDC",
"END_DECRYPTION", "CARDCTRL", "BADMDC",
"SC_OP_FAILURE", "SC_OP_SUCCESS",
"PINENTRY_LAUNCHED", "KEY_CONSIDERED"):
pass
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint:
return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key == "KEY_CREATED":
(self.type, self.fingerprint) = value.split()
elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ExportResult(GenKey):
"""Handle status messages for --export[-secret-key].
For now, just use an existing class to base it on - if needed, we
can override handle_status for more specific message handling.
"""
def handle_status(self, key, value):
if key in ("EXPORTED", "EXPORT_RES"):
pass
else:
super(ExportResult, self).handle_status(key, value)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambiguous specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM": # pragma: no cover
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def __nonzero__(self):
return self.status == 'ok'
__bool__ = __nonzero__
class Sign(TextHandler):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.hash_algo = None
self.fingerprint = None
self.status = None
self.key_id = None
self.username = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover
self.status = 'key expired'
elif key == "KEYREVOKED": # pragma: no cover
self.status = 'key revoked'
elif key == "SIG_CREATED":
(self.type,
algo, self.hash_algo, cls, self.timestamp, self.fingerprint
) = value.split()
self.status = 'signature created'
elif key == "USERID_HINT": # pragma: no cover
self.key_id, self.username = value.split(' ', 1)
elif key == "BAD_PASSPHRASE":
self.status = 'bad passphrase'
elif key in ("NEED_PASSPHRASE", "GOOD_PASSPHRASE", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
VERSION_RE = re.compile(
r'gpg \(GnuPG(?:/MacGPG2)?\) (\d+(\.\d+)*)'.encode('ascii'), re.I)
HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'send': SendResult,
'list': ListKeys,
'scan': ScanKeys,
'search': SearchKeys,
'sign': Sign,
'verify': Verify,
'export': ExportResult,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None, options=None,
secret_keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use, or list of such
keyrings. If specified, the default keyring is not used.
options =-- a list of additional options to pass to the GPG binary.
secret_keyring -- name of alternative secret keyring file to use, or
list of such keyrings.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
if keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(keyring, string_types):
keyring = [keyring]
self.keyring = keyring
if secret_keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(secret_keyring, string_types):
secret_keyring = [secret_keyring]
self.secret_keyring = secret_keyring
self.verbose = verbose
self.use_agent = use_agent
if isinstance(options, str): # pragma: no cover
options = [options]
self.options = options
self.on_data = None # or a callable - will be called with data chunks
# Changed in 0.3.7 to use Latin-1 encoding rather than
# locale.getpreferredencoding falling back to sys.stdin.encoding
# falling back to utf-8, because gpg itself uses latin-1 as the default
# encoding.
self.encoding = 'latin-1'
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome, 0x1C0)
try:
p = self._open_subprocess(["--version"])
except OSError:
msg = 'Unable to run gpg (%s) - it may not be available.' % self.gpgbinary
logger.exception(msg)
raise OSError(msg)
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0: # pragma: no cover
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
m = VERSION_RE.match(result.data)
if not m: # pragma: no cover
self.version = None
else:
dot = '.'.encode('ascii')
self.version = tuple([int(s) for s in m.groups()[0].split(dot)])
def make_args(self, args, passphrase):
"""
Make a list of command line elements for GPG. The value of ``args``
will be appended. The ``passphrase`` argument needs to be True if
a passphrase will be sent to GPG, else False.
"""
cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty', '--no-verbose']
if 'DEBUG_IPC' in os.environ:
cmd.extend(['--debug', 'ipc'])
if passphrase and hasattr(self, 'version'):
if self.version >= (2, 1):
cmd[1:1] = ['--pinentry-mode', 'loopback']
cmd.extend(['--fixed-list-mode', '--batch', '--with-colons'])
if self.gnupghome:
cmd.extend(['--homedir', no_quote(self.gnupghome)])
if self.keyring:
cmd.append('--no-default-keyring')
for fn in self.keyring:
cmd.extend(['--keyring', no_quote(fn)])
if self.secret_keyring:
for fn in self.secret_keyring:
cmd.extend(['--secret-keyring', no_quote(fn)])
if passphrase:
cmd.extend(['--passphrase-fd', '0'])
if self.use_agent: # pragma: no cover
cmd.append('--use-agent')
if self.options:
cmd.extend(self.options)
cmd.extend(args)
return cmd
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
# def debug_print(cmd):
# result = []
# for c in cmd:
# if ' ' not in c:
# result.append(c)
# else:
# if '"' not in c:
# result.append('"%s"' % c)
# elif "'" not in c:
# result.append("'%s'" % c)
# else:
# result.append(c) # give up
# return ' '.join(cmd)
from subprocess import list2cmdline as debug_print
cmd = self.make_args(args, passphrase)
if self.verbose: # pragma: no cover
print(debug_print(cmd))
if not STARTUPINFO:
si = None
else: # pragma: no cover
si = STARTUPINFO()
si.dwFlags = STARTF_USESHOWWINDOW
si.wShowWindow = SW_HIDE
result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE,
startupinfo=si)
logger.debug("%s: %s", result.pid, debug_print(cmd))
return result
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose: # pragma: no cover
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result, on_data=None):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if on_data:
on_data(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response,
args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data,
args=(stdout, result, self.on_data))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError: # pragma: no cover
pass
stderr.close()
stdout.close()
def _handle_io(self, args, fileobj, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary: # pragma: no cover
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(fileobj, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def set_output_without_confirmation(self, args, output):
"If writing to a file which exists, avoid a confirmation message."
if os.path.exists(output):
# We need to avoid an overwrite confirmation message
args.extend(['--yes'])
args.extend(['--output', no_quote(output)])
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False, output=None, extra_args=None):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary: # pragma: no cover
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.extend(['--default-key', no_quote(keyid)])
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if extra_args:
args.extend(extra_args)
result = self.result_map['sign'](self)
# We could use _handle_io here except for the fact that if the
# passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError: # pragma: no cover
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data, **kwargs):
"""Verify the signature on the contents of the string 'data'
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f, **kwargs)
f.close()
return result
def verify_file(self, file, data_filename=None, close_file=True, extra_args=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
if close_file:
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(no_quote(fn))
args.append(no_quote(data_filename))
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
def verify_data(self, sig_filename, data, extra_args=None):
"Verify the signature in sig_filename against data in memory"
logger.debug('verify_data: %r, %r ...', sig_filename, data[:16])
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
args.extend([no_quote(sig_filename), '-'])
stream = _make_memory_stream(data)
self._handle_io(args, stream, result, binary=True)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
"""
Import the key_data into our keyring.
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.recv_keys('pgp.mit.edu', '92905378')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--recv-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def send_keys(self, keyserver, *keyids):
"""Send a key to a keyserver.
Note: it's not practical to test this function without sending
arbitrary data to live keyservers.
"""
result = self.result_map['send'](self)
logger.debug('send_keys: %r', keyids)
data = _make_binary_stream('', self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--send-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('send_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False, passphrase=None,
expect_passphrase=True):
"""
Delete the indicated keys.
Since GnuPG 2.1, you can't delete secret keys without providing a
passphrase. However, if you're expecting the passphrase to go to gpg
via pinentry, you should specify expect_passphrase=False. (It's only
checked for GnuPG >= 2.1).
"""
which = 'key'
if secret: # pragma: no cover
if (self.version >= (2, 1) and passphrase is None and
expect_passphrase):
raise ValueError('For GnuPG >= 2.1, deleting secret keys '
'needs a passphrase to be provided')
which = 'secret-key'
if _is_sequence(fingerprints): # pragma: no cover
fingerprints = [no_quote(s) for s in fingerprints]
else:
fingerprints = [no_quote(fingerprints)]
args = ['--delete-%s' % which]
args.extend(fingerprints)
result = self.result_map['delete'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
return result
def export_keys(self, keyids, secret=False, armor=True, minimal=False,
passphrase=None, expect_passphrase=True):
"""
Export the indicated keys. A 'keyid' is anything gpg accepts.
Since GnuPG 2.1, you can't export secret keys without providing a
passphrase. However, if you're expecting the passphrase to go to gpg
via pinentry, you should specify expect_passphrase=False. (It's only
checked for GnuPG >= 2.1).
"""
which = ''
if secret:
which = '-secret-key'
if (self.version >= (2, 1) and passphrase is None and
expect_passphrase):
raise ValueError('For GnuPG >= 2.1, exporting secret keys '
'needs a passphrase to be provided')
if _is_sequence(keyids):
keyids = [no_quote(k) for k in keyids]
else:
keyids = [no_quote(keyids)]
args = ['--export%s' % which]
if armor:
args.append('--armor')
if minimal: # pragma: no cover
args.extend(['--export-options', 'export-minimal'])
args.extend(keyids)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['export'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
logger.debug('export_keys result: %r', result.data)
# Issue #49: Return bytes if armor not specified, else text
result = result.data
if armor:
result = result.decode(self.encoding, self.decode_errors)
return result
def _get_list_output(self, p, kind):
# Get the response information
result = self.result_map[kind](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr sub ssb sig'.split()
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug("line: %r", line.rstrip())
if not line: # pragma: no cover
break
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def list_keys(self, secret=False, keys=None, sigs=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert fp1 in pubkeys.fingerprints
>>> assert fp2 in pubkeys.fingerprints
"""
if sigs:
which = 'sigs'
else:
which = 'keys'
if secret:
which = 'secret-keys'
args = ['--list-%s' % which,
'--fingerprint', '--fingerprint'] # get subkey FPs, too
if keys:
if isinstance(keys, string_types):
keys = [keys]
args.extend(keys)
p = self._open_subprocess(args)
return self._get_list_output(p, 'list')
def scan_keys(self, filename):
"""
List details of an ascii armored or binary key file
without first importing it to the local keyring.
The function achieves this on modern GnuPG by running:
$ gpg --dry-run --import-options import-show --import
On older versions, it does the *much* riskier:
$ gpg --with-fingerprint --with-colons filename
"""
if self.version >= (2, 1):
args = ['--dry-run', '--import-options', 'import-show', '--import']
else:
logger.warning('Trying to list packets, but if the file is not a '
'keyring, might accidentally decrypt')
args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode']
args.append(no_quote(filename))
p = self._open_subprocess(args)
return self._get_list_output(p, 'scan')
def search_keys(self, query, keyserver='pgp.mit.edu'):
""" search keyserver by query (using --search-keys option)
>>> import shutil
>>> shutil.rmtree('keys', ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys')
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.search_keys('<vinay_sajip@hotmail.com>')
>>> assert result, 'Failed using default keyserver'
>>> #keyserver = 'keyserver.ubuntu.com'
>>> #result = gpg.search_keys('<vinay_sajip@hotmail.com>', keyserver)
>>> #assert result, 'Failed using keyserver.ubuntu.com'
"""
query = query.strip()
if HEX_DIGITS_RE.match(query):
query = '0x' + query
args = ['--fingerprint',
'--keyserver', no_quote(keyserver), '--search-keys',
no_quote(query)]
p = self._open_subprocess(args)
# Get the response information
result = self.result_map['search'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = ['pub', 'uid']
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug('line: %r', line.rstrip())
if not line: # sometimes get blank lines on Windows
continue
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_', '-').title()
if str(val).strip(): # skip empty strings
parms[key] = val
parms.setdefault('Key-Type', 'RSA')
parms.setdefault('Key-Length', 2048)
parms.setdefault('Name-Real', "Autogenerated Key")
logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or
'unspecified')
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: joe@foo.bar
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False, extra_args=None):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
# can't be False or None - could be True or a cipher algo value
# such as AES256
args = ['--symmetric']
if symmetric is not True:
args.extend(['--cipher-algo', no_quote(symmetric)])
# else use the default, currently CAST5
else:
if not recipients:
raise ValueError('No recipients specified with asymmetric '
'encryption')
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.extend(['--recipient', no_quote(recipient)])
if armor: # create ascii-armored output - False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if sign is True: # pragma: no cover
args.append('--sign')
elif sign: # pragma: no cover
args.extend(['--sign', '--default-key', no_quote(sign)])
if always_trust: # pragma: no cover
args.append('--always-trust')
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2')
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> result = gpg.encrypt("hello",fp2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again", fp1)
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message, passphrase='pp1')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == fp1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None, extra_args=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if always_trust: # pragma: no cover
args.append("--always-trust")
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
def trust_keys(self, fingerprints, trustlevel):
levels = Verify.TRUST_LEVELS
if trustlevel not in levels:
poss = ', '.join(sorted(levels))
raise ValueError('Invalid trust level: "%s" (must be one of %s)' %
(trustlevel, poss))
trustlevel = levels[trustlevel] + 2
import tempfile
try:
fd, fn = tempfile.mkstemp()
lines = []
if isinstance(fingerprints, string_types):
fingerprints = [fingerprints]
for f in fingerprints:
lines.append('%s:%s:' % (f, trustlevel))
# The trailing newline is required!
s = os.linesep.join(lines) + os.linesep
logger.debug('writing ownertrust info: %s', s)
os.write(fd, s.encode(self.encoding))
os.close(fd)
result = self.result_map['delete'](self)
p = self._open_subprocess(['--import-ownertrust', fn])
self._collect_output(p, result, stdin=p.stdin)
finally:
os.remove(fn)
return result
| 36.067839 | 148 | 0.566388 |
7954f1110e7072d2caf60c06fbd029edc261d2c8 | 29,069 | bzl | Python | tensorflow/lite/build_def.bzl | Halo9Pan/_tensorflow | 85c8b2a817f95a3e979ecd1ed95bff1dc1335cff | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/lite/build_def.bzl | Halo9Pan/_tensorflow | 85c8b2a817f95a3e979ecd1ed95bff1dc1335cff | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/lite/build_def.bzl | Halo9Pan/_tensorflow | 85c8b2a817f95a3e979ecd1ed95bff1dc1335cff | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | """Generate Flatbuffer binary from json."""
load(
"//tensorflow:tensorflow.bzl",
"clean_dep",
"tf_binary_additional_srcs",
"tf_cc_shared_object",
"tf_cc_test",
)
load("//tensorflow/lite/java:aar_with_jni.bzl", "aar_with_jni")
load("@build_bazel_rules_android//android:rules.bzl", "android_library")
def tflite_copts():
"""Defines compile time flags."""
copts = [
"-DFARMHASH_NO_CXX_STRING",
] + select({
clean_dep("//tensorflow:android_arm"): [
"-mfpu=neon",
],
clean_dep("//tensorflow:ios_x86_64"): [
"-msse4.1",
],
clean_dep("//tensorflow:windows"): [
"/DTFL_COMPILE_LIBRARY",
"/wd4018", # -Wno-sign-compare
],
"//conditions:default": [
"-Wno-sign-compare",
],
}) + select({
clean_dep("//tensorflow:optimized"): ["-O3"],
"//conditions:default": [],
}) + select({
clean_dep("//tensorflow:android"): [
"-ffunction-sections", # Helps trim binary size.
"-fdata-sections", # Helps trim binary size.
],
"//conditions:default": [],
}) + select({
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-fno-exceptions", # Exceptions are unused in TFLite.
],
})
return copts
EXPORTED_SYMBOLS = clean_dep("//tensorflow/lite/java/src/main/native:exported_symbols.lds")
LINKER_SCRIPT = clean_dep("//tensorflow/lite/java/src/main/native:version_script.lds")
def tflite_linkopts_unstripped():
"""Defines linker flags to reduce size of TFLite binary.
These are useful when trying to investigate the relative size of the
symbols in TFLite.
Returns:
a select object with proper linkopts
"""
# In case you wonder why there's no --icf is because the gains were
# negligible, and created potential compatibility problems.
return select({
clean_dep("//tensorflow:android"): [
"-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
"-Wl,--gc-sections", # Eliminate unused code and data.
"-Wl,--as-needed", # Don't link unused libs.
],
"//conditions:default": [],
})
def tflite_jni_linkopts_unstripped():
"""Defines linker flags to reduce size of TFLite binary with JNI.
These are useful when trying to investigate the relative size of the
symbols in TFLite.
Returns:
a select object with proper linkopts
"""
# In case you wonder why there's no --icf is because the gains were
# negligible, and created potential compatibility problems.
return select({
clean_dep("//tensorflow:android"): [
"-Wl,--gc-sections", # Eliminate unused code and data.
"-Wl,--as-needed", # Don't link unused libs.
],
"//conditions:default": [],
})
def tflite_symbol_opts():
"""Defines linker flags whether to include symbols or not."""
return select({
clean_dep("//tensorflow:android"): [
"-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
],
"//conditions:default": [],
}) + select({
clean_dep("//tensorflow:debug"): [],
"//conditions:default": [
"-s", # Omit symbol table, for all non debug builds
],
})
def tflite_linkopts():
"""Defines linker flags to reduce size of TFLite binary."""
return tflite_linkopts_unstripped() + tflite_symbol_opts()
def tflite_jni_linkopts():
"""Defines linker flags to reduce size of TFLite binary with JNI."""
return tflite_jni_linkopts_unstripped() + tflite_symbol_opts()
def tflite_jni_binary(
name,
copts = tflite_copts(),
linkopts = tflite_jni_linkopts(),
linkscript = LINKER_SCRIPT,
exported_symbols = EXPORTED_SYMBOLS,
linkshared = 1,
linkstatic = 1,
testonly = 0,
deps = [],
tags = [],
srcs = []):
"""Builds a jni binary for TFLite."""
linkopts = linkopts + select({
clean_dep("//tensorflow:macos"): [
"-Wl,-exported_symbols_list,$(location {})".format(exported_symbols),
"-Wl,-install_name,@rpath/" + name,
],
clean_dep("//tensorflow:windows"): [],
"//conditions:default": [
"-Wl,--version-script,$(location {})".format(linkscript),
"-Wl,-soname," + name,
],
})
native.cc_binary(
name = name,
copts = copts,
linkshared = linkshared,
linkstatic = linkstatic,
deps = deps + [linkscript, exported_symbols],
srcs = srcs,
tags = tags,
linkopts = linkopts,
testonly = testonly,
)
def tflite_cc_shared_object(
name,
copts = tflite_copts(),
linkopts = [],
linkstatic = 1,
per_os_targets = False,
**kwargs):
"""Builds a shared object for TFLite."""
tf_cc_shared_object(
name = name,
copts = copts,
linkstatic = linkstatic,
linkopts = linkopts + tflite_jni_linkopts(),
framework_so = [],
per_os_targets = per_os_targets,
**kwargs
)
def tf_to_tflite(name, src, options, out):
"""Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
Args:
name: Name of rule.
src: name of the input graphdef file.
options: options passed to TFLite Converter.
out: name of the output flatbuffer file.
"""
toco_cmdline = " ".join([
"$(location //tensorflow/lite/python:tflite_convert)",
"--experimental_new_converter",
("--graph_def_file=$(location %s)" % src),
("--output_file=$(location %s)" % out),
] + options)
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = toco_cmdline,
tools = ["//tensorflow/lite/python:tflite_convert"] + tf_binary_additional_srcs(),
)
def DEPRECATED_tf_to_tflite(name, src, options, out):
"""DEPRECATED Convert a frozen tensorflow graphdef to TF Lite's flatbuffer, using toco.
Please use tf_to_tflite instead.
TODO(b/138396996): Migrate away from this deprecated rule.
Args:
name: Name of rule.
src: name of the input graphdef file.
options: options passed to TOCO.
out: name of the output flatbuffer file.
"""
toco_cmdline = " ".join([
"$(location //tensorflow/lite/toco:toco)",
"--input_format=TENSORFLOW_GRAPHDEF",
"--output_format=TFLITE",
("--input_file=$(location %s)" % src),
("--output_file=$(location %s)" % out),
] + options)
native.genrule(
name = name,
srcs = [src],
outs = [out],
cmd = toco_cmdline,
tools = ["//tensorflow/lite/toco:toco"] + tf_binary_additional_srcs(),
)
def tflite_to_json(name, src, out):
"""Convert a TF Lite flatbuffer to JSON.
Args:
name: Name of rule.
src: name of the input flatbuffer file.
out: name of the output JSON file.
"""
flatc = "@flatbuffers//:flatc"
schema = "//tensorflow/lite/schema:schema.fbs"
native.genrule(
name = name,
srcs = [schema, src],
outs = [out],
cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
"$(location %s) --raw-binary --strict-json -t" +
" -o /tmp $(location %s) -- $${TMP}.bin &&" +
"cp $${TMP}.json $(location %s)") %
(src, flatc, schema, out),
tools = [flatc],
)
def json_to_tflite(name, src, out):
"""Convert a JSON file to TF Lite's flatbuffer.
Args:
name: Name of rule.
src: name of the input JSON file.
out: name of the output flatbuffer file.
"""
flatc = "@flatbuffers//:flatc"
schema = "//tensorflow/lite/schema:schema_fbs"
native.genrule(
name = name,
srcs = [schema, src],
outs = [out],
cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
"$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
" -o /tmp $(location %s) $${TMP}.json &&" +
"cp $${TMP}.bin $(location %s)") %
(src, flatc, schema, out),
tools = [flatc],
)
# This is the master list of generated examples that will be made into tests. A
# function called make_XXX_tests() must also appear in generate_examples.py.
# Disable a test by adding it to the blacklists specified in
# generated_test_models_failing().
def generated_test_models():
return [
"abs",
"add",
"add_n",
"arg_min_max",
"avg_pool",
"batch_to_space_nd",
"cast",
"ceil",
"concat",
"constant",
"conv",
"conv_relu",
"conv_relu1",
"conv_relu6",
"conv2d_transpose",
"conv_with_shared_weights",
"conv_to_depthwiseconv_with_shared_weights",
"cos",
"depthwiseconv",
"depth_to_space",
"div",
"elu",
"equal",
"exp",
"embedding_lookup",
"expand_dims",
"eye",
"fill",
"floor",
"floor_div",
"floor_mod",
"fully_connected",
"fused_batch_norm",
"gather",
"gather_nd",
"gather_with_constant",
"global_batch_norm",
"greater",
"greater_equal",
"hardswish",
"identity",
"sum",
"l2norm",
"l2norm_shared_epsilon",
"l2_pool",
"leaky_relu",
"less",
"less_equal",
"local_response_norm",
"log_softmax",
"log",
"logical_and",
"logical_or",
"logical_xor",
"lstm",
"matrix_diag",
"matrix_set_diag",
"max_pool",
"maximum",
"mean",
"minimum",
"mirror_pad",
"mul",
"nearest_upsample",
"neg",
"not_equal",
"one_hot",
"pack",
"pad",
"padv2",
"placeholder_with_default",
"prelu",
"pow",
"range",
"rank",
"reduce_any",
"reduce_max",
"reduce_min",
"reduce_prod",
"relu",
"relu1",
"relu6",
"reshape",
"resize_bilinear",
"resize_nearest_neighbor",
"resolve_constant_strided_slice",
"reverse_sequence",
"reverse_v2",
"rfft2d",
"round",
"rsqrt",
"scatter_nd",
"shape",
"sigmoid",
"sin",
"slice",
"softmax",
"space_to_batch_nd",
"space_to_depth",
"sparse_to_dense",
"split",
"splitv",
"sqrt",
"square",
"squared_difference",
"squeeze",
"strided_slice",
"strided_slice_1d_exhaustive",
"strided_slice_np_style",
"sub",
"tanh",
"tile",
"topk",
"transpose",
"transpose_conv",
"unfused_gru",
"unidirectional_sequence_lstm",
"unidirectional_sequence_rnn",
"unique",
"unpack",
"unroll_batch_matmul",
"where",
"zeros_like",
]
# List of models that fail generated tests for the conversion mode.
# If you have to disable a test, please add here with a link to the appropriate
# bug or issue.
def generated_test_models_failing(conversion_mode):
"""Returns the list of failing test models.
Args:
conversion_mode: Conversion mode.
Returns:
List of failing test models for the conversion mode.
"""
if conversion_mode == "toco-flex":
return [
"lstm", # TODO(b/117510976): Restore when lstm flex conversion works.
"unidirectional_sequence_lstm",
"unidirectional_sequence_rnn",
]
elif conversion_mode == "forward-compat":
return [
"merged_models", # b/150647401
]
return [
"merged_models", # b/150647401
]
def generated_test_models_successful(conversion_mode):
"""Returns the list of successful test models.
Args:
conversion_mode: Conversion mode.
Returns:
List of successful test models for the conversion mode.
"""
return [test_model for test_model in generated_test_models() if test_model not in generated_test_models_failing(conversion_mode)]
def generated_test_conversion_modes():
"""Returns a list of conversion modes."""
return ["toco-flex", "forward-compat", ""]
def common_test_args_for_generated_models(conversion_mode, failing):
"""Returns test args for generated model tests.
Args:
conversion_mode: Conversion mode.
failing: True if the generated model test is failing.
Returns:
test args of generated models.
"""
args = []
# Flex conversion shouldn't suffer from the same conversion bugs
# listed for the default TFLite kernel backend.
if conversion_mode == "toco-flex":
args.append("--ignore_known_bugs=false")
return args
def common_test_tags_for_generated_models(conversion_mode, failing):
"""Returns test tags for generated model tests.
Args:
conversion_mode: Conversion mode.
failing: True if the generated model test is failing.
Returns:
tags for the failing generated model tests.
"""
tags = []
if failing:
return ["notap", "manual"]
return tags
def generated_test_models_all():
"""Generates a list of all tests with the different converters.
Returns:
List of tuples representing:
(conversion mode, name of test, test tags, test args).
"""
conversion_modes = generated_test_conversion_modes()
tests = generated_test_models()
options = []
for conversion_mode in conversion_modes:
failing_tests = generated_test_models_failing(conversion_mode)
for test in tests:
failing = test in failing_tests
if conversion_mode:
test += "_%s" % conversion_mode
tags = common_test_tags_for_generated_models(conversion_mode, failing)
args = common_test_args_for_generated_models(conversion_mode, failing)
options.append((conversion_mode, test, tags, args))
return options
def merged_test_model_name():
"""Returns the name of merged test model.
Returns:
The name of merged test model.
"""
return "merged_models"
def max_number_of_test_models_in_merged_zip():
"""Returns the maximum number of merged test models in a zip file.
Returns:
Maximum number of merged test models in a zip file.
"""
return 15
def number_of_merged_zip_file(conversion_mode):
"""Returns the number of merged zip file targets.
Returns:
Number of merged zip file targets.
"""
m = max_number_of_test_models_in_merged_zip()
return (len(generated_test_models_successful(conversion_mode)) + m - 1) // m
def merged_test_models():
"""Generates a list of merged tests with the different converters.
This model list should be referred only if :generate_examples supports
--no_tests_limit and --test_sets flags.
Returns:
List of tuples representing:
(conversion mode, name of group, test tags, test args).
"""
conversion_modes = generated_test_conversion_modes()
tests = generated_test_models()
options = []
for conversion_mode in conversion_modes:
test = merged_test_model_name()
if conversion_mode:
test += "_%s" % conversion_mode
successful_tests = generated_test_models_successful(conversion_mode)
if len(successful_tests) > 0:
tags = common_test_tags_for_generated_models(conversion_mode, False)
# Only non-merged tests are executed on TAP.
# Merged test rules are only for running on the real device environment.
if "notap" not in tags:
tags.append("notap")
args = common_test_args_for_generated_models(conversion_mode, False)
n = number_of_merged_zip_file(conversion_mode)
for i in range(n):
test_i = "%s_%d" % (test, i)
options.append((conversion_mode, test_i, tags, args))
return options
def flags_for_merged_test_models(test_name, conversion_mode):
"""Returns flags for generating zipped-example data file for merged tests.
Args:
test_name: str. Test name in the form of "<merged_model_name>_[<conversion_mode>_]%d".
conversion_mode: str. Which conversion mode to run with. Comes from the
list above.
Returns:
Flags for generating zipped-example data file for merged tests.
"""
prefix = merged_test_model_name() + "_"
if not test_name.startswith(prefix):
fail(msg = "Invalid test name " + test_name + ": test name should start " +
"with " + prefix + " when using flags of merged test models.")
# Remove prefix and conversion_mode from the test name
# to extract merged test index number.
index_string = test_name[len(prefix):]
if conversion_mode:
index_string = index_string.replace("%s_" % conversion_mode, "")
# If the maximum number of test models in a file is 15 and the number of
# successful test models are 62, 5 zip files will be generated.
# To assign the test models fairly among these files, each zip file
# should contain 12 or 13 test models. (62 / 5 = 12 ... 2)
# Each zip file will have 12 test models and the first 2 zip files will have
# 1 more test model each, resulting [13, 13, 12, 12, 12] assignment.
# So Zip file 0, 1, 2, 3, 4 and 5 will have model[0:13], model[13:26],
# model[26,38], model[38,50] and model[50,62], respectively.
zip_index = int(index_string)
num_merged_zips = number_of_merged_zip_file(conversion_mode)
test_models = generated_test_models_successful(conversion_mode)
# Each zip file has (models_per_zip) or (models_per_zip+1) test models.
models_per_zip = len(test_models) // num_merged_zips
# First (models_remaining) zip files have (models_per_zip+1) test models each.
models_remaining = len(test_models) % num_merged_zips
if zip_index < models_remaining:
# Zip files [0:models_remaining] have (models_per_zip+1) models.
begin = (models_per_zip + 1) * zip_index
end = begin + (models_per_zip + 1)
else:
# Zip files [models_remaining:] have (models_per_zip) models.
begin = models_per_zip * zip_index + models_remaining
end = begin + models_per_zip
tests_csv = ""
for test_model in test_models[begin:end]:
tests_csv += "%s," % test_model
if tests_csv != "":
tests_csv = tests_csv[:-1] # Remove trailing comma.
return " --no_tests_limit --test_sets=%s" % tests_csv
def gen_zip_test(
name,
test_name,
conversion_mode,
test_tags,
test_args,
additional_test_tags_args = {},
**kwargs):
"""Generate a zipped-example test and its dependent zip files.
Args:
name: str. Resulting cc_test target name
test_name: str. Test targets this model. Comes from the list above.
conversion_mode: str. Which conversion mode to run with. Comes from the
list above.
test_tags: tags for the generated cc_test.
test_args: the basic cc_test args to be used.
additional_test_tags_args: a dictionary of additional test tags and args
to be used together with test_tags and test_args. The key is an
identifier which can be in creating a test tag to identify a set of
tests. The value is a tuple of list of additional test tags and args to
be used.
**kwargs: tf_cc_test kwargs
"""
toco = "//tensorflow/lite/toco:toco"
flags = ""
if conversion_mode == "toco-flex":
flags += " --ignore_converter_errors --run_with_flex"
elif conversion_mode == "forward-compat":
flags += " --make_forward_compat_test"
if test_name.startswith(merged_test_model_name() + "_"):
flags += flags_for_merged_test_models(test_name, conversion_mode)
gen_zipped_test_file(
name = "zip_%s" % test_name,
file = "%s.zip" % test_name,
toco = toco,
flags = flags + " --save_graphdefs",
)
tf_cc_test(
name,
args = test_args,
tags = test_tags + ["gen_zip_test"],
**kwargs
)
for key, value in additional_test_tags_args.items():
extra_tags, extra_args = value
extra_tags.append("gen_zip_test_%s" % key)
tf_cc_test(
name = "%s_%s" % (name, key),
args = test_args + extra_args,
tags = test_tags + extra_tags,
**kwargs
)
def gen_zipped_test_file(name, file, toco, flags):
"""Generate a zip file of tests by using :generate_examples.
Args:
name: str. Name of output. We will produce "`file`.files" as a target.
file: str. The name of one of the generated_examples targets, e.g. "transpose"
toco: str. Pathname of toco binary to run
flags: str. Any additional flags to include
"""
native.genrule(
name = file + ".files",
cmd = (("$(locations :generate_examples) --toco $(locations {0}) " +
" --zip_to_output {1} {2} $(@D)").format(toco, file, flags)),
outs = [file],
tools = [
":generate_examples",
toco,
],
)
native.filegroup(
name = name,
srcs = [file],
)
def gen_selected_ops(name, model, namespace = "", **kwargs):
"""Generate the library that includes only used ops.
Args:
name: Name of the generated library.
model: TFLite models to interpret, expect a list in case of multiple models.
namespace: Namespace in which to put RegisterSelectedOps.
**kwargs: Additional kwargs to pass to genrule.
"""
out = name + "_registration.cc"
tool = clean_dep("//tensorflow/lite/tools:generate_op_registrations")
tflite_path = "//tensorflow/lite"
# isinstance is not supported in skylark.
if type(model) != type([]):
model = [model]
input_models_args = " --input_models=%s" % ",".join(
["$(location %s)" % f for f in model],
)
native.genrule(
name = name,
srcs = model,
outs = [out],
cmd = ("$(location %s) --namespace=%s --output_registration=$(location %s) --tflite_path=%s %s") %
(tool, namespace, out, tflite_path[2:], input_models_args),
tools = [tool],
**kwargs
)
def flex_dep(target_op_sets):
if "SELECT_TF_OPS" in target_op_sets:
return ["//tensorflow/lite/delegates/flex:delegate"]
else:
return []
def gen_model_coverage_test(src, model_name, data, failure_type, tags, size = "medium"):
"""Generates Python test targets for testing TFLite models.
Args:
src: Main source file.
model_name: Name of the model to test (must be also listed in the 'data'
dependencies)
data: List of BUILD targets linking the data.
failure_type: List of failure types (none, toco, crash, inference, evaluation)
expected for the corresponding combinations of op sets
("TFLITE_BUILTINS", "TFLITE_BUILTINS,SELECT_TF_OPS", "SELECT_TF_OPS").
tags: List of strings of additional tags.
"""
i = 0
for target_op_sets in ["TFLITE_BUILTINS", "TFLITE_BUILTINS,SELECT_TF_OPS", "SELECT_TF_OPS"]:
args = []
if failure_type[i] != "none":
args.append("--failure_type=%s" % failure_type[i])
i = i + 1
# Avoid coverage timeouts for large/enormous tests.
coverage_tags = ["nozapfhahn"] if size in ["large", "enormous"] else []
native.py_test(
name = "model_coverage_test_%s_%s" % (model_name, target_op_sets.lower().replace(",", "_")),
srcs = [src],
main = src,
size = size,
args = [
"--model_name=%s" % model_name,
"--target_ops=%s" % target_op_sets,
] + args,
data = data,
srcs_version = "PY2AND3",
python_version = "PY3",
tags = [
"no_gpu", # Executing with TF GPU configurations is redundant.
"no_oss",
"no_windows",
] + tags + coverage_tags,
deps = [
"//tensorflow/lite/testing/model_coverage:model_coverage_lib",
"//tensorflow/lite/python:lite",
"//tensorflow/python:client_testlib",
] + flex_dep(target_op_sets),
)
def tflite_custom_cc_library(
name,
models = [],
srcs = [],
deps = [],
visibility = ["//visibility:private"]):
"""Generates a tflite cc library, stripping off unused operators.
This library includes the TfLite runtime as well as all operators needed for the given models.
Op resolver can be retrieved using tflite::CreateOpResolver method.
Args:
name: Str, name of the target.
models: List of models. This TFLite build will only include
operators used in these models. If the list is empty, all builtin
operators are included.
srcs: List of files implementing custom operators if any.
deps: Additional dependencies to build all the custom operators.
visibility: Visibility setting for the generated target. Default to private.
"""
real_srcs = []
real_srcs.extend(srcs)
real_deps = []
real_deps.extend(deps)
if models:
gen_selected_ops(
name = "%s_registration" % name,
model = models,
)
real_srcs.append(":%s_registration" % name)
real_deps.append("//tensorflow/lite/java/src/main/native:selected_ops_jni")
else:
# Support all operators if `models` not specified.
real_deps.append("//tensorflow/lite/java/src/main/native")
native.cc_library(
name = name,
srcs = real_srcs,
hdrs = [
# TODO(b/161323860) replace this by generated header.
"//tensorflow/lite/java/src/main/native:op_resolver.h",
],
copts = tflite_copts(),
linkopts = select({
"//tensorflow:windows": [],
"//conditions:default": ["-lm", "-ldl"],
}),
deps = depset([
"//tensorflow/lite:framework",
"//tensorflow/lite/kernels:builtin_ops",
] + real_deps),
visibility = visibility,
)
def tflite_custom_android_library(
name,
models = [],
srcs = [],
deps = [],
custom_package = "org.tensorflow.lite",
visibility = ["//visibility:private"]):
"""Generates a tflite Android library, stripping off unused operators.
Note that due to a limitation in the JNI Java wrapper, the compiled TfLite shared binary
has to be named as tensorflowlite_jni.so so please make sure that there is no naming conflict.
i.e. you can't call this rule multiple times in the same build file.
Args:
name: Name of the target.
models: List of models to be supported. This TFLite build will only include
operators used in these models. If the list is empty, all builtin
operators are included.
srcs: List of files implementing custom operators if any.
deps: Additional dependencies to build all the custom operators.
custom_package: Name of the Java package. It is required by android_library in case
the Java source file can't be inferred from the directory where this rule is used.
visibility: Visibility setting for the generated target. Default to private.
"""
tflite_custom_cc_library(name = "%s_cc" % name, models = models, srcs = srcs, deps = deps, visibility = visibility)
# JNI wrapper expects a binary file called `libtensorflowlite_jni.so` in java path.
tflite_jni_binary(
name = "libtensorflowlite_jni.so",
linkscript = "//tensorflow/lite/java:tflite_version_script.lds",
deps = [
":%s_cc" % name,
"//tensorflow/lite/java/src/main/native:native_framework_only",
],
)
native.cc_library(
name = "%s_jni" % name,
srcs = ["libtensorflowlite_jni.so"],
visibility = visibility,
)
android_library(
name = name,
manifest = "//tensorflow/lite/java:AndroidManifest.xml",
deps = [
":%s_jni" % name,
"//tensorflow/lite/java:tensorflowlite_java",
"@org_checkerframework_qual",
],
custom_package = custom_package,
visibility = visibility,
)
aar_with_jni(
name = "%s_aar" % name,
android_library = name,
)
| 32.883484 | 133 | 0.601225 |
7954f1bbc29135d85c0c2ea93ee80960217d90da | 2,220 | py | Python | gmid2/scripts/jg_bte_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | null | null | null | gmid2/scripts/jg_bte_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | null | null | null | gmid2/scripts/jg_bte_bw.py | junkyul/gmid2-public | 363472b8b69212dd6a9dac61d3e5d23936a5a6d2 | [
"MIT"
] | 1 | 2020-12-28T20:06:37.000Z | 2020-12-28T20:06:37.000Z | PRJ_PATH = "/home/junkyul/conda/gmid2"
import sys
sys.path.append(PRJ_PATH)
import os
import time
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
from gmid2.global_constants import *
from gmid2.basics.uai_files import read_limid, read_vo, write_vo
from gmid2.basics.undirected_network import PrimalGraph, call_variable_ordering, get_induced_width_from_ordering
from gmid2.basics.graphical_model import GraphicalModel
from gmid2.inference.bucket import bucket_tree_decomposition
from gmid2.inference.pgm_bte import PgmBTELog
def run(file_path):
print("{}".format(PgmBTELog.__name__))
f = file_path.split("/")[-1].replace(".uai", "")
print("\nSTART {}\t\t{}".format(f, time.ctime(time.time())))
file_name = file_path.replace(".uai", "")
file_info = read_limid(file_name, skip_table=False)
gm = GraphicalModel()
gm.build(file_info)
gm.convert_prob_to_log()
gm.convert_util_to_alpha(1.0)
t0 = time.time()
pg = PrimalGraph()
pg.build_from_scopes(gm.scope_vids)
try:
print("read vo from file")
read_vo(file_name + ".vo", file_info)
bw_ordering = file_info.vo
bw_iw = get_induced_width_from_ordering(pg, nid_ordering=[pg.nid2vid[el] for el in bw_ordering])
except:
pvo_vid = file_info.blocks
bw_ordering, bw_iw = call_variable_ordering(pg, 10000, pvo_vid)
write_vo(file_name + ".vo", bw_ordering, bw_iw)
print("w\t\t{}\nvo\t\t{}".format(bw_iw, " ".join(str(el) for el in bw_ordering)))
bt = bucket_tree_decomposition(gm, bw_ordering)
bte = PgmBTELog(gm, bw_ordering)
bte.build_message_graph(bt)
print("build\t\t{}".format(time.time() - t0))
bte.schedule(bt)
bte.init_propagate()
t0 = time.time()
bte.propagate_iter()
bound = bte.bounds()
print("prop\t\t{}".format(time.time() - t0))
print("ub\t\t{}".format(bound))
print("END {}\t\t{}".format(f, time.ctime(time.time())))
return bound
if __name__ == "__main__":
if len(sys.argv) > 1:
file_path = sys.argv[1]
else:
TEST_PATH = os.path.join(BENCHMARK_DIR, "synthetic")
f = "mdp1-4_2_2_5.uai"
file_path = os.path.join(TEST_PATH, f)
run(file_path) | 34.153846 | 112 | 0.682883 |
7954f3c7d0f872e8ed360755da5dd9132b698a31 | 840 | py | Python | backend/posts/migrations_back/0003_auto_20170303_2103.py | stevethompsonstar/django-react-blog | 88af926454901c826acc9e2996addd0d53b0626a | [
"MIT"
] | 592 | 2017-03-07T04:29:08.000Z | 2020-09-21T00:36:58.000Z | backend/posts/migrations_back/0003_auto_20170303_2103.py | stevethompsonstar/django-react-blog | 88af926454901c826acc9e2996addd0d53b0626a | [
"MIT"
] | 8 | 2017-03-08T01:22:36.000Z | 2020-08-20T15:45:42.000Z | backend/posts/migrations_back/0003_auto_20170303_2103.py | stevethompsonstar/django-react-blog | 88af926454901c826acc9e2996addd0d53b0626a | [
"MIT"
] | 102 | 2017-03-07T05:42:47.000Z | 2020-08-28T20:02:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 21:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20170301_1328'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('slug', models.SlugField(default='', max_length=64)),
],
options={
'verbose_name_plural': 'categories',
},
),
migrations.RemoveField(
model_name='tag',
name='parent',
),
]
| 27.096774 | 114 | 0.55119 |
7954f3ecda5067f6e523c31c01f28a2b51d2eab0 | 520 | py | Python | rookie/users/admin.py | rookie0806/ajounice | eb1780a8f94d847cfcfe83f4b555729ab9f9a44a | [
"MIT"
] | null | null | null | rookie/users/admin.py | rookie0806/ajounice | eb1780a8f94d847cfcfe83f4b555729ab9f9a44a | [
"MIT"
] | 16 | 2020-06-05T19:35:23.000Z | 2022-03-08T22:33:45.000Z | rookie/users/admin.py | rookie0806/ajounice | eb1780a8f94d847cfcfe83f4b555729ab9f9a44a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from rookie.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name","gender")}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 30.588235 | 91 | 0.748077 |
7954f46899f49f21d66ade05a31fc9161694ded1 | 4,006 | py | Python | pix2pix/options/test_options.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | pix2pix/options/test_options.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | pix2pix/options/test_options.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
parser.set_defaults(model='test')
# To avoid cropping, the load_size should be the same as crop_size
parser.set_defaults(load_size=parser.get_default('crop_size'))
self.isTrain = False
parser.set_defaults(norm='batch', netG='unet_128', dataset_mode='aligned')
parser.set_defaults(pool_size=0, gan_mode='lsgan')
parser.add_argument('--lambda_L1', type=float, default=500, help='weight for L1 loss')
parser.add_argument('--cfg', type=str, default='config.yaml',
help='configuration of tracking')
parser.add_argument('--dataset', type=str, default='OTB100',
help='datasets')
parser.add_argument('--dataset_dir', type=str,
default='/media/wattanapongsu/4T/dataset',
help='dataset directory')
parser.add_argument('--video', default='', type=str,
help='eval one special video')
parser.add_argument('--saved_dir', default='', type=str,
help='save images and videos in this directory')
parser.add_argument('--fabricated_dir', default='', type=str,
help='save images and videos in this directory')
parser.add_argument('--netG_pretrained', default='', type=str,
help='netG pretrained ')
parser.add_argument('--snapshot', default='', type=str,
help='snapshot of models to eval')
parser.add_argument('--attack_search', action='store_true',
help='enable attack search')
parser.add_argument('--model_name', default='', type=str,
help='model name ')
parser.add_argument('--k', default=1, type=float,
help='disturbed parameter in template')
parser.add_argument('--ks', default=1, type=float,
help='amplified parameter in searching')
parser.add_argument('--chk', default=1, type=int,
help='checkpoint number')
parser.add_argument('--export_video', action='store_true',
help='export video output')
parser.add_argument('--vis', dest='vis', action='store_true')
parser.add_argument('--gpus', default=0, type=int,
help='number of gpus')
parser.add_argument('--search_attack', action='store_true',
help='attack search image')
parser.add_argument('--z_size', type=int, default=128,
help='interpolate template size')
parser.add_argument('--model_search', action='store_true',
help='generate noise in search style')
parser.add_argument('--skip_exist', action='store_true',
help='skip testing when video exists ')
return parser | 57.228571 | 108 | 0.590614 |
7954f51c6ff43641a67cf2179b2a47727c9213a2 | 23,340 | py | Python | src/spectrum.py | eirikgje/c3pp | 0efe2cba7960131509d5d968ac825c4f1dbbc552 | [
"MIT"
] | null | null | null | src/spectrum.py | eirikgje/c3pp | 0efe2cba7960131509d5d968ac825c4f1dbbc552 | [
"MIT"
] | null | null | null | src/spectrum.py | eirikgje/c3pp | 0efe2cba7960131509d5d968ac825c4f1dbbc552 | [
"MIT"
] | null | null | null | from matplotlib import rcParams, rc
import matplotlib.patheffects as path_effects
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from cycler import cycler
from tqdm import trange, tqdm
import numpy as np
import healpy as hp
import sys
import math
from brokenaxes import brokenaxes
import src.tools as tls
def Spectrum(pol, long, darkmode, png, foregrounds, masks, nside):
params = {'savefig.dpi' : 300, # save figures to 300 dpi
'xtick.top' : False,
'ytick.right' : True, #Set to false
'axes.spines.top' : True, #Set to false
'axes.spines.bottom' : True,
'axes.spines.left' : True,
'axes.spines.right' : True, #Set to false@
'axes.grid.axis' : 'y',
'axes.grid' : False,
'ytick.major.size' : 10,
'ytick.minor.size' : 5,
'xtick.major.size' : 10,
'xtick.minor.size' : 5,
'ytick.major.width' : 1.5,
'ytick.minor.width' : 1.5,
'xtick.major.width' : 1.5,
'xtick.minor.width' : 1.5,
'axes.linewidth' : 1.5,
'axes.prop_cycle' : cycler(color=['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'])
#'ytick.major.size' : 6,
#'ytick.minor.size' : 3,
#'xtick.major.size' : 6,
#'xtick.minor.size' : 3,
}
blue, red, green, purple, orange, teal, lightred, lightgreen, pink, yellow = ("C0","C1","C2","C3","C4","C5","C6","C7","C8","C9",)
black = 'k'
if darkmode:
rcParams['text.color'] = 'white' # axes background color
rcParams['axes.facecolor'] = 'white' # axes background color
rcParams['axes.edgecolor' ] = 'white' # axes edge color
rcParams['axes.labelcolor'] = 'white'
rcParams['xtick.color'] = 'white' # color of the tick labels
rcParams['ytick.color'] = 'white' # color of the tick labels
rcParams['grid.color'] = 'white' # grid color
rcParams['legend.facecolor'] = 'inherit' # legend background color (when 'inherit' uses axes.facecolor)
rcParams['legend.edgecolor']= 'white' # legend edge color (when 'inherit' uses axes.edgecolor)
black = 'white'
rcParams.update(params)
# ---- Figure parameters ----
if pol:
ymin, ymax = (1e-3, 2e2)
if long:
xmin, xmax = (1, 3000)
ymax15, ymax2 = (ymax+100, 1e7)
else:
xmin, xmax = (10, 1000)
else:
ymin, ymax = (0.05, 7e2)
if long:
xmin, xmax = (0.3, 4000)
ymax15, ymax2 = (ymax+500, 1e7)
else:
xmin, xmax = (10, 1000)
if long:
# Figure
ratio = 5
w, h = (16,8)
fig, (ax2, ax) = plt.subplots(2,1,sharex=True,figsize=(w,h),gridspec_kw = {'height_ratios':[1, ratio]})
aspect_ratio = w/h*1.25 # Correct for ratio
rotdir = -1
ax2.spines['bottom'].set_visible(False)
ax.spines['top'].set_visible(False)
ax2.tick_params(labelbottom=False)
ax2.xaxis.set_ticks_position('none')
# ---- Adding broken axis lines ----
d = .005 # how big to make the diagonal lines in axes coordinates
kwargs = dict(transform=ax2.transAxes, color=black, clip_on=False)
ax2.plot((-d, +d), (-d*ratio, + d*ratio), **kwargs) # top-left diagonal
ax2.plot((1 - d, 1 + d), (-d*ratio, +d*ratio), **kwargs) # top-right diagonal
kwargs.update(transform=ax.transAxes) # switch to the bottom axes
ax.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
# textsize
freqtext = 16
fgtext = 18
else:
ymax2=ymax
ymax15=ymax
w, h = (12,8)
fig, ax = plt.subplots(1,1,figsize=(w,h))
aspect_ratio = w/h
rotdir = 1
#ax.set_aspect('equal', adjustable='box')
freqtext = 20
fgtext = 20
# Spectrum parameters
field = 1 if pol else 0
nu = np.logspace(np.log10(0.1),np.log10(5000),1000)
npix = hp.nside2npix(nside)
# Read masks
m = np.ones((len(masks), npix))
for i, mask in enumerate(masks):
# Read and ud_grade mask
if mask:
m_temp = hp.read_map(mask, field=0, dtype=None, verbose=False)
if hp.npix2nside(len(m_temp)) != nside:
m[i] = hp.ud_grade(m_temp, nside)
m[i,m[i,:]>0.5] = 1 # Set all mask values to integer
m[i,m[i,:]<0.5] = 0 # Set all mask values to integer
else:
m[i] = m_temp
# Get indices of smallest mask
idx = m[np.argmax(np.sum(m, axis=1)), :] > 0.5
skyfracs = np.sum(m,axis=1)/npix*100
print(f"Using sky fractions {skyfracs}%")
# Looping over foregrounds and calculating spectra
i = 0
add_error = True
for fg in foregrounds.keys():
if not fg == "Sum fg.":
if fg.startswith("CO"): # get closest thing to ref freq
foregrounds[fg]["params"][-2], _ = find_nearest(nu, foregrounds[fg]["params"][-2])
foregrounds[fg]["spectrum"] = getspec(nu*1e9, fg, foregrounds[fg]["params"], foregrounds[fg]["function"], field, nside, npix, idx, m,)
"""
if fg.startswith("CO"):#unit conversion fudge factor
foregrounds[fg]["spectrum"
] = foregrounds[fg]["spectrum"]*75
"""
if add_error and foregrounds[fg]["spectrum"].shape[0]>1 and not fg.startswith("CO"):
thresh=0.1
alpha=0.5
foregrounds[fg]["spectrum"][0] = foregrounds[fg]["spectrum"][0]*(1-np.exp(-(abs(foregrounds[fg]["spectrum"][0]/thresh)**alpha)))
foregrounds[fg]["spectrum"][1] = foregrounds[fg]["spectrum"][1]/(1-np.exp(-(abs(foregrounds[fg]["spectrum"][1]/thresh)**alpha)))
if foregrounds[fg]["sum"]:
if i==0:
if foregrounds[fg]["spectrum"].shape[0] == 1:
# special case where first summed is 1d
foregrounds["Sum fg."]["spectrum"] = np.concatenate((foregrounds[fg]["spectrum"],foregrounds[fg]["spectrum"])).copy()
else:
foregrounds["Sum fg."]["spectrum"] = foregrounds[fg]["spectrum"].copy()
else:
foregrounds["Sum fg."]["spectrum"] += foregrounds[fg]["spectrum"]
i+=1
# ---- Plotting foregrounds and labels ----
j=0
for label, fg in foregrounds.items(): # Plot all fgs except sumf
if fg["gradient"]:
k = 0
gradient_fill(nu, fg["spectrum"][k], fill_color=fg["color"], ax=ax, alpha=0.5, linewidth=0.0,)
else:
if label == "Sum fg.":
ax.loglog(nu,fg["spectrum"][0], linestyle=fg["linestyle"], linewidth=2, color=fg["color"])
if long:
ax2.loglog(nu,fg["spectrum"][0], linestyle=fg["linestyle"], linewidth=2, color=fg["color"])
k = 0
try:
ax.loglog(nu,fg["spectrum"][1], linestyle=fg["linestyle"], linewidth=2, color=fg["color"])
if long:
ax2.loglog(nu,fg["spectrum"][1], linestyle=fg["linestyle"], linewidth=2, color=fg["color"])
k=1
except:
pass
elif label.startswith("CO"):
lfreq = nu[np.argmax(fg["spectrum"][0])]
if fg["spectrum"].shape[0] > 1:
ax.loglog([lfreq,lfreq],[max(fg["spectrum"][0]), max(fg["spectrum"][1])], linestyle=fg["linestyle"], linewidth=4, color=fg["color"],zorder=1000)
k=1
else:
k=0
ax.bar(lfreq, fg["spectrum"][0], color=black,)
else:
if fg["spectrum"].shape[0] == 1:
ax.loglog(nu,fg["spectrum"][0], linestyle=fg["linestyle"], linewidth=4, color=fg["color"])
if long:
ax2.loglog(nu,fg["spectrum"][0], linestyle=fg["linestyle"], linewidth=4, color=fg["color"])
k = 0
else:
#gradient_fill(nu, fg["spectrum"][0], fill_color=fg["color"], ax=ax, alpha=0.5, linewidth=0.0,)
ax.loglog(nu,np.mean(fg["spectrum"],axis=0), linestyle=fg["linestyle"], linewidth=4, color=fg["color"])
ax.fill_between(nu,fg["spectrum"][0],fg["spectrum"][1], color=fg["color"],alpha=0.5)
if long:
ax2.loglog(nu,np.mean(fg["spectrum"],axis=0), linestyle=fg["linestyle"], linewidth=4, color=fg["color"])
ax2.fill_between(nu,fg["spectrum"][0],fg["spectrum"][1], color=fg["color"], alpha=0.5)
k = 1
if label == "Thermal Dust" and fg["spectrum"].shape[0]>1:
if long:
_, fsky_idx = find_nearest(nu, 900)
else:
_, fsky_idx = find_nearest(nu, 700)
ax.annotate(r"$f_{sky}=$"+"{:d}%".format(int(skyfracs[1])), xy=(nu[fsky_idx], fg["spectrum"][1][fsky_idx]), ha="center", va="bottom", fontsize=fgtext, color="grey", xytext=(0,5), textcoords="offset pixels",)
ax.annotate(r"$f_{sky}=$"+"{:d}%".format(int(skyfracs[0])), xy=(nu[fsky_idx], fg["spectrum"][0][fsky_idx]), ha="center", va="top", fontsize=fgtext, color="grey", xytext=(0,-15), textcoords="offset pixels",)
if label.startswith("CO"):
ax.text(lfreq, np.max(fg["spectrum"][k])*0.5, label, color=fg["color"], alpha=0.7, ha='right',va='center',rotation=90,fontsize=fgtext, path_effects=[path_effects.withSimplePatchShadow(alpha=0.8, offset=(1, -1))], zorder=1000)
else:
x0, idx1 = find_nearest(nu, fg["position"])
x1, idx2 = find_nearest(nu, fg["position"]*1.2)
y0 = fg["spectrum"][k][idx1]
y1 = fg["spectrum"][k][idx2]
datascaling = np.log(xmin/xmax)/np.log(ymin/ymax)
rotator = (datascaling/aspect_ratio)
alpha = np.arctan(np.log(y1/y0)/np.log(x1/x0)*rotator)
rotation = np.rad2deg(alpha)#*rotator
ax.annotate(label, xy=(x0,y0), xytext=(0,7), textcoords="offset pixels", rotation=rotation, rotation_mode='anchor', fontsize=fgtext, color=fg["color"], path_effects=[path_effects.withSimplePatchShadow(alpha=0.8,offset=(1, -1)),],)# horizontalalignment="center")
# ---- Data band ranges ----
if long:
yscaletext = 0.75
yscaletextup = 1.2
else:
yscaletextup = 1.03
yscaletext = 0.90
# TODO add these as args?
haslam = True
chipass = True
spass = True
cbass = True
quijote = False
wmap = True
planck = True
dirbe = True
databands = {"Haslam": {"0.408\nHaslam": {"pol": False, "show": haslam, "position": [.408, ymin*yscaletextup], "range": [.406,.410], "color": purple,}},
"S-PASS": {"2.303\nS-PASS": {"pol": True, "show": spass, "position": [2.35, ymax2*yscaletext], "range": [2.1,2.4], "color": green,}},
"C-BASS": {"5.0\nC-BASS": {"pol": True, "show": spass, "position": [5., ymax2*yscaletext], "range": [4.,6.], "color": blue,}},
"CHI-PASS":{"1.394\nCHI-PASS":{"pol": False, "show": chipass,"position": [1.3945, ymin*yscaletextup],"range": [1.3945-0.064/2, 1.3945+0.064/2], "color": lightred,}},
"QUIJOTE": {"11\nQUIJOTE": {"pol": True, "show": quijote,"position": [11, ymax2*yscaletext], "range": [10.,12.], "color": orange,},
"13": {"pol": True, "show": quijote, "position": [13, ymax2*yscaletext], "range": [12.,14.], "color": orange,},
"17": {"pol": True, "show": quijote, "position": [17, ymax2*yscaletext], "range": [16.,18.], "color": orange,},
"19": {"pol": True, "show": quijote, "position": [20, ymax2*yscaletext], "range": [18.,21.], "color": orange,},
"31": {"pol": True, "show": quijote, "position": [31, ymax2*yscaletext], "range": [26.,36.], "color": orange,},
"41": {"pol": True, "show": quijote, "position": [42, ymax2*yscaletext], "range": [35.,47.], "color": orange,}},
"Planck": {"30": {"pol": True, "show": planck, "position": [27, ymax2*yscaletext], "range": [23.9,34.5],"color": orange,}, # Planck 30
"44": {"pol": True, "show": planck, "position": [40, ymax2*yscaletext], "range": [39,50] ,"color": orange,}, # Planck 44
"70": {"pol": True, "show": planck, "position": [60, ymax2*yscaletext], "range": [60,78] ,"color": orange,}, # Planck 70
"100\nPlanck": {"pol": True, "show": planck, "position": [90, ymax2*yscaletext], "range": [82,120] ,"color": orange,}, # Planck 100
"143": {"pol": True, "show": planck, "position": [130, ymax2*yscaletext], "range": [125,170] ,"color": orange,}, # Planck 143
"217": {"pol": True, "show": planck, "position": [195, ymax2*yscaletext], "range": [180,265] ,"color": orange,}, # Planck 217
"353": {"pol": True, "show": planck, "position": [320, ymax2*yscaletext], "range": [300,430] ,"color": orange,}, # Planck 353
"545": {"pol": False, "show": planck, "position": [490, ymax2*yscaletext], "range": [450,650] ,"color": orange,}, # Planck 545
"857": {"pol": False, "show": planck, "position": [730, ymax2*yscaletext], "range": [700,1020] ,"color": orange,}}, # Planck 857
"DIRBE": {"DIRBE\n1250": {"pol": False, "show": dirbe, "position": [1000, ymin*yscaletextup], "range": [1000,1540] , "color": red,}, # DIRBE 1250
"2140": {"pol": False, "show": dirbe, "position": [1750, ymin*yscaletextup], "range": [1780,2500] , "color": red,}, # DIRBE 2140
"3000": {"pol": False, "show": dirbe, "position": [2500, ymin*yscaletextup], "range": [2600,3500] , "color": red,}}, # DIRBE 3000
"WMAP": {"K": {"pol": True, "show": wmap, "position": [21.8, ymin*yscaletextup], "range": [21,25.5], "color": teal,},
"WMAP\nKa": {"pol": True, "show": wmap, "position": [31.5, ymin*yscaletextup], "range": [30,37], "color": teal,},
"Q": {"pol": True, "show": wmap, "position": [39., ymin*yscaletextup], "range": [38,45], "color": teal,},
"V": {"pol": True, "show": wmap, "position": [58., ymin*yscaletextup], "range": [54,68], "color": teal,},
"W": {"pol": True, "show": wmap, "position": [90., ymin*yscaletextup], "range": [84,106], "color": teal,}},
}
# Set databands from dictonary
for experiment, bands in databands.items():
for label, band in bands.items():
if band["show"]:
if pol and not band["pol"]:
continue # Skip non-polarization bands
if band["position"][0]>=xmax or band["position"][0]<=xmin:
continue # Skip databands outside range
va = "bottom" if experiment in ["WMAP", "CHI-PASS", "DIRBE", "Haslam"] else "top" # VA for WMAP on bottom
ha = "left" if experiment in ["Planck", "WMAP", "DIRBE",] else "center"
ax.axvspan(*band["range"], color=band["color"], alpha=0.3, zorder=0, label=experiment)
if long:
ax2.axvspan(*band["range"], color=band["color"], alpha=0.3, zorder=0, label=experiment)
if experiment in ["WMAP", "CHI-PASS", "DIRBE", "Haslam"]:
ax.text(*band["position"], label, color=band["color"], va=va, ha=ha, size=freqtext, path_effects=[path_effects.withSimplePatchShadow(alpha=0.8, offset=(1,-1))])
else:
ax2.text(*band["position"], label, color=band["color"], va=va, ha=ha, size=freqtext, path_effects=[path_effects.withSimplePatchShadow(alpha=0.8, offset=(1,-1))])
else:
ax.text(*band["position"], label, color=band["color"], va=va, ha=ha, size=freqtext, path_effects=[path_effects.withSimplePatchShadow(alpha=0.8, offset=(1,-1))])
# ---- Axis stuff ----
lsize=20
# Dumb tick fix
ticks = []
ticks_ = [1,3,10,30,100,300,1000,3000]
for i, tick in enumerate(ticks_):
if tick>=xmin and tick<=xmax:
ticks.append(tick)
ax.set(xscale='log', yscale='log', ylim=(ymin, ymax), xlim=(xmin,xmax),xticks=ticks, xticklabels=ticks)
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
ax.tick_params(axis='both', which='major', labelsize=lsize, direction='in')
ax.tick_params(which="both",direction="in")
if long:
ax2.set(xscale='log', yscale='log', ylim=(ymax15, ymax2), xlim=(xmin,xmax), yticks=[1e4,1e6,], xticks=ticks, xticklabels=ticks)
ax2.tick_params(axis='both', which='major', labelsize=lsize, direction='in')
ax2.tick_params(which="both",direction="in")
# Axis labels
plt.ylabel(r"RMS brightness temperature [$\mu$K]",fontsize=lsize)
plt.xlabel(r"Frequency [GHz]",fontsize=lsize)
#ax.legend(loc=6,prop={'size': 20}, frameon=False)
# ---- Plotting ----
plt.tight_layout(h_pad=0.3)
filename = "spectrum"
filename += "_pol" if pol else ""
filename += "_long" if long else ""
filename += "_darkmode" if darkmode else ""
filename += ".png" if png else ".pdf"
print("Plotting {}".format(filename))
plt.savefig(filename, bbox_inches='tight', pad_inches=0.02, transparent=True)
def gradient_fill(x, y, fill_color=None, ax=None,invert=False, **kwargs):
"""
Plot a line with a linear alpha gradient filled beneath it.
Parameters
----------
x, y : array-like
The data values of the line.
fill_color : a matplotlib color specifier (string, tuple) or None
The color for the fill. If None, the color of the line will be used.
ax : a matplotlib Axes instance
The axes to plot on. If None, the current pyplot axes will be used.
Additional arguments are passed on to matplotlib's ``plot`` function.
Returns
-------
line : a Line2D instance
The line plotted.
im : an AxesImage instance
The transparent gradient clipped to just the area beneath the curve.
"""
import matplotlib.colors as mcolors
from matplotlib.patches import Polygon
if ax is None:
ax = plt.gca()
line, = ax.plot(x, y, **kwargs)
if fill_color is None:
fill_color = line.get_color()
zorder = line.get_zorder()
alpha = line.get_alpha()
alpha = 1.0 if alpha is None else alpha
z = np.empty((100, 1, 4), dtype=float)
rgb = mcolors.colorConverter.to_rgb(fill_color)
z[:,:,:3] = rgb
z[:,:,-1] = np.linspace(0, alpha, 100)[:,None]
xmin, xmax, ymin, ymax = x.min(), x.max(), y.min(), y.max()
if invert:
ymin,ymax = (ymax,ymin)
im = ax.imshow(z, aspect='auto', extent=[xmin, xmax, ymin, ymax],
origin='lower', zorder=zorder)
xy = np.column_stack([x, y])
xy = np.vstack([[xmin, ymin], xy, [xmax, ymin], [xmin, ymin]])
clip_path = Polygon(xy, facecolor='none', edgecolor='none', closed=True)
ax.add_patch(clip_path)
im.set_clip_path(clip_path)
ax.autoscale(True)
return line, im
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
# This function calculates the intensity spectra
# Alternative 1 uses 2 masks to calculate spatial variations
# Alternative 2 uses only scalar values
def getspec(nu, fg, params, function, field, nside, npix, idx, m):
val = []
# Alternative 1
if any([str(x).endswith(".fits") for x in params]) or any([isinstance(x,np.ndarray) for x in params]):
if fg == "Spinning Dust":
from pathlib import Path
ame_template = Path(__file__).parent / "spdust2_cnm.dat"
fnu, f_ = np.loadtxt(ame_template, unpack=True)
fnu *= 1e9
field = 0
temp = []
nsides = []
# Read all maps and record nsides
for i, p in enumerate(params):
if str(p).endswith(".fits"):
if field==1 and i==0: # If polarization amplitude map
s1 = hp.read_map(p, field=1, dtype=None, verbose=False)
s2 = hp.read_map(p, field=2, dtype=None, verbose=False)
p = np.sqrt(s1**2+s2**2)
else:
p = hp.read_map(p, field=field, dtype=None, verbose=False)
nsides.append(hp.npix2nside(len(p)))
elif isinstance(p, np.ndarray):
if field==1 and i==0:
p = np.sqrt(p[1]**2+p[2]**2)
elif p.ndim > 1 and p.shape[0]>1:
p = p[field]
nsides.append(hp.npix2nside(len(p)))
else:
nsides.append(0)
temp.append(p)
# Create dataset and convert to same resolution
params = np.zeros(( len(params), npix ))
for i, t in enumerate(temp):
if nsides[i] == 0:
params[i,:] = t
elif nsides[i] != nside:
params[i,:] = hp.ud_grade(t, nside)
else:
params[i,:] = t
# Only calculate outside masked region
N = 1000
map_ = np.zeros((N, npix))
for i, nu_ in enumerate(tqdm(nu, desc = fg, ncols=80)):
if fg == "Spinning Dust":
map_[i, idx] = getattr(tls, function)(nu_, *params[:,idx], fnu, f_) #fgs.fg(nu, *params[pix])
else:
map_[i, idx] = getattr(tls, function)(nu_, *params[:,idx]) #fgs.fg(nu, *params[pix])
# Apply mask to all frequency points
# calculate mean
rmss = []
for i in range(2):
n = np.sum(m[i])
masked = hp.ma(map_)
masked.mask = np.logical_not(m[i])
mono = masked.mean(axis=1)
masked -= mono.reshape(-1,1)
rms = np.sqrt( ( (masked**2).sum(axis=1) ) /n)
val.append(rms)
vals = np.sort(np.array(val), axis=0)
else:
# Alternative 2
val = getattr(tls, function)(nu, *params) #fgs.fg(nu, *params))
#vals = np.stack((val, val),)
vals = val.reshape(1,-1)
return vals
| 49.449153 | 274 | 0.527763 |
7954f58b233c4cfa79c3d2bea27230f16e89f73e | 9,815 | py | Python | src/olympia/lib/es/tests/test_commands.py | rochisha0/addons-server | 55eb6ccee7ec075cf8f5ed25095c123410186532 | [
"BSD-3-Clause"
] | 1 | 2021-11-27T15:47:47.000Z | 2021-11-27T15:47:47.000Z | src/olympia/lib/es/tests/test_commands.py | rochisha0/addons-server | 55eb6ccee7ec075cf8f5ed25095c123410186532 | [
"BSD-3-Clause"
] | 1,398 | 2020-10-08T06:32:26.000Z | 2022-03-31T12:06:24.000Z | src/olympia/lib/es/tests/test_commands.py | rochisha0/addons-server | 55eb6ccee7ec075cf8f5ed25095c123410186532 | [
"BSD-3-Clause"
] | 1 | 2021-11-24T07:29:55.000Z | 2021-11-24T07:29:55.000Z | import re
import threading
import time
import io
from unittest import mock
from django.conf import settings
from django.core import management
from django.db import connection
from django.test.testcases import TransactionTestCase
from celery import group, task
from celery.canvas import _chain
from olympia.amo.tests import addon_factory, ESTestCase, reverse_ns
from olympia.amo.utils import urlparams
from olympia.lib.es.management.commands import reindex
from olympia.lib.es.utils import is_reindexing_amo, unflag_reindexing_amo
@task
def dummy_task():
return None
class TestIndexCommand(ESTestCase):
def setUp(self):
super(TestIndexCommand, self).setUp()
if is_reindexing_amo():
unflag_reindexing_amo()
self.url = reverse_ns('addon-search')
# We store previously existing indices in order to delete the ones
# created during this test run.
self.indices = self.es.indices.stats()['indices'].keys()
self.addons = []
self.expected = self.addons[:]
# Monkeypatch Celerys ".get()" inside async task error
# until https://github.com/celery/celery/issues/4661 (which isn't just
# about retries but a general regression that manifests only in
# eager-mode) fixed.
self.patch('celery.app.task.denied_join_result')
# Since this test plays with transactions, but we don't have (and don't
# really want to have) a ESTransactionTestCase class, use the fixture setup
# and teardown methods from TransactionTestCase.
def _fixture_setup(self):
return TransactionTestCase._fixture_setup(self)
def _fixture_teardown(self):
return TransactionTestCase._fixture_teardown(self)
def tearDown(self):
current_indices = self.es.indices.stats()['indices'].keys()
for index in current_indices:
if index not in self.indices:
self.es.indices.delete(index, ignore=404)
super(TestIndexCommand, self).tearDown()
def check_settings(self, new_indices):
"""Make sure the indices settings are properly set."""
for index, alias in new_indices:
settings = self.es.indices.get_settings(alias)[index]['settings']
# These should be set in settings_test.
assert int(settings['index']['number_of_replicas']) == 0
assert int(settings['index']['number_of_shards']) == 1
def check_results(self, expected):
"""Make sure the expected addons are listed in a standard search."""
response = self.client.get(urlparams(self.url, sort='downloads'))
assert response.status_code == 200
got = self.get_results(response)
for addon in expected:
assert addon.pk in got, '%s is not in %s' % (addon.pk, got)
return response
def get_results(self, response):
"""Return pks of add-ons shown on search results page."""
results = response.data['results']
return [item['id'] for item in results]
@classmethod
def get_indices_aliases(cls):
"""Return the test indices with an alias."""
indices = cls.es.indices.get_alias()
items = [(index, list(aliases['aliases'].keys())[0])
for index, aliases in indices.items()
if len(aliases['aliases']) > 0 and index.startswith('test_')]
items.sort()
return items
def _test_reindexation(self, wipe=False):
# Current indices with aliases.
old_indices = self.get_indices_aliases()
# This is to start a reindexation in the background.
class ReindexThread(threading.Thread):
def __init__(self):
self.stdout = io.StringIO()
super(ReindexThread, self).__init__()
def run(self):
# We need to wait at least a second, to make sure the alias
# name is going to be different, since we already create an
# alias in setUpClass.
time.sleep(1)
management.call_command(
'reindex', wipe=wipe, noinput=True, stdout=self.stdout)
t = ReindexThread()
t.start()
# Wait for the reindex in the thread to flag the database.
# The database transaction isn't shared with the thread, so force the
# commit.
while t.is_alive() and not is_reindexing_amo():
connection._commit()
connection.clean_savepoints()
if not wipe:
# We should still be able to search in the foreground while the
# reindex is being done in the background. We should also be able
# to index new documents, and they should not be lost.
old_addons_count = len(self.expected)
while t.is_alive() and len(self.expected) < old_addons_count + 3:
self.expected.append(addon_factory())
connection._commit()
connection.clean_savepoints()
# We don't know where the search will happen, the reindexing
# could be over by now. So force a refresh on *all* indices.
self.refresh(None)
self.check_results(self.expected)
if len(self.expected) == old_addons_count:
raise AssertionError(
'Could not index objects in foreground while reindexing '
'in the background.')
t.join() # Wait for the thread to finish.
t.stdout.seek(0)
stdout = t.stdout.read()
assert 'Reindexation done' in stdout, stdout
# The reindexation is done, let's double check we have all our docs.
connection._commit()
connection.clean_savepoints()
self.refresh()
self.check_results(self.expected)
# New indices have been created, and aliases now point to them.
new_indices = self.get_indices_aliases()
assert len(new_indices)
assert old_indices != new_indices, (stdout, old_indices, new_indices)
self.check_settings(new_indices)
def test_reindexation_starting_from_zero_addons(self):
self._test_reindexation()
def test_reindexation_starting_from_one_addon(self):
self.addons.append(addon_factory())
self.expected = self.addons[:]
self.refresh()
self.check_results(self.expected)
self._test_reindexation()
def test_reindexation_with_wipe(self):
self.addons.append(addon_factory())
self.expected = self.addons[:]
self.refresh()
self.check_results(self.expected)
self._test_reindexation(wipe=True)
def test_stats_download_counts(self):
old_indices = self.get_indices_aliases()
stdout = io.StringIO()
management.call_command(
'reindex', key='stats_download_counts', stdout=stdout)
stdout.seek(0)
buf = stdout.read()
new_indices = self.get_indices_aliases()
assert len(new_indices)
assert old_indices != new_indices, (buf, old_indices, new_indices)
@mock.patch.object(reindex, 'gather_index_data_tasks')
def _test_workflow(self, key, gather_index_data_tasks_mock):
command = reindex.Command()
alias = settings.ES_INDEXES[key]
# Patch reindex.gather_index_data_tasks so that it returns a group of
# dummy tasks - otherwise the chain would not contain the indexation
# tasks and that's what we really care about.
gather_index_data_tasks_mock.return_value = group(
[dummy_task.si()] * 42
)
workflow = command.create_workflow(alias)
# Make sure we called gather_index_data_tasks_mock with the alias and
# timestamped index.
expected_index = alias
assert gather_index_data_tasks_mock.call_args[0][0] == expected_index
assert gather_index_data_tasks_mock.call_args[0][1].startswith(
expected_index
)
assert re.search(
'[0-9]{14}$', gather_index_data_tasks_mock.call_args[0][1]
)
# Inspect workflow to make sure it contains what we expect. We should
# have a chain with a few startup tasks, then a chord that indexes the
# data and finishes with cleanup tasks.
assert isinstance(workflow, _chain)
expected_tasks = [
'olympia.lib.es.management.commands.reindex.create_new_index',
'olympia.lib.es.management.commands.reindex.flag_database',
'celery.chord'
]
assert expected_tasks == [task.name for task in workflow.tasks]
reindex_chord = workflow.tasks[2]
expected_header = [
'olympia.lib.es.tests.test_commands.dummy_task'
] * 42
assert expected_header == [task.name for task in reindex_chord.tasks]
expected_body = [
'olympia.lib.es.management.commands.reindex.update_aliases',
'olympia.lib.es.management.commands.reindex.unflag_database',
]
assert isinstance(reindex_chord.body, _chain)
for i, task_name in enumerate(expected_body):
assert task_name == reindex_chord.body.tasks[i].name
# Note: there might be an extra task at the end of the chain to delete
# existing indexes depending on how tests are called/set up.
def test_create_workflow_addons(self):
"""
Test tasks returned by create_workflow() as used by reindex command,
for addons.
"""
self._test_workflow('default')
def test_create_workflow_stats_download_counts(self):
"""
Test tasks returned by create_workflow() as used by reindex command,
for stats_download_counts.
"""
self._test_workflow('stats_download_counts')
| 38.641732 | 79 | 0.645339 |
7954f7b2c5c44c2cb5ca833a258c4d7d9e60f895 | 1,927 | py | Python | metaopt/cifar/main_quotient.py | vuiseng9/OHO | 1c0bdd10502317f41f717deb317d6dcb04b3c6bf | [
"MIT"
] | 8 | 2021-02-24T19:20:30.000Z | 2021-08-11T21:25:54.000Z | metaopt/cifar/main_quotient.py | vuiseng9/OHO | 1c0bdd10502317f41f717deb317d6dcb04b3c6bf | [
"MIT"
] | null | null | null | metaopt/cifar/main_quotient.py | vuiseng9/OHO | 1c0bdd10502317f41f717deb317d6dcb04b3c6bf | [
"MIT"
] | 2 | 2021-04-13T00:54:58.000Z | 2021-08-10T21:08:30.000Z | import os, sys, math, argparse, time
import torch
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, cycle
import pickle
from resnet18 import *
from metaopt.util import *
from metaopt.util_ml import *
from metaopt.optimizer import SGD_Multi_LR
from metaopt.cifar.main import *
from numpy.random import MT19937
from numpy.random import RandomState, SeedSequence
ifold=8
RNG = np.random.RandomState(ifold)
TRAIN=0
VALID=1
TEST =2
basepath = '/scratch/ji641'
def ideal_hyper_exp(args, sampler_type=None, thrd=0.):
dataset = load_cifar10(args)
trial, contF = 0, 1
te_loss_list = []
#quotient_list = [1,2,4,8,16]
quotient_list = [4]
for quotient in quotient_list:
args.quotient = quotient
args.sampler_type = None
args.model_type = 'qrez18'
print('Model Type: %s Opt Type: %s meta-lr %f lr %f l2 %f, Update Freq %d Reset Freq %d |Nvl| %d Quotient %d' \
% (args.model_type, args.opt_type, args.mlr, args.lr, \
args.lambda_l2, args.update_freq, args.reset_freq, args.valid_size, args.quotient))
trial +=1
print('<<---- Trial %d Lr %f L2 %f Epoch %d--->>' % (trial, args.lr, args.lambda_l2, args.num_epoch))
te_loss = main(args, trial=trial, ifold=ifold, quotient=quotient, device=device)
te_loss_list.append(te_loss)
print('*** Trial %d Test loss %f Lr %f L2 %f ***' \
% (trial, te_loss_list[-1], args.lr, args.lambda_l2))
print(quotient_list)
print(te_loss_list)
if __name__ == '__main__':
args = parse_args()
is_cuda = 1
args.is_cuda = is_cuda
device = 'cuda' if is_cuda else 'cpu'
ideal_hyper_exp(args)
| 28.761194 | 119 | 0.679294 |
7954f7ce4f27753d1a7bef49658650263d9c9fa5 | 3,333 | py | Python | pyslowloris/attack.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 75 | 2017-06-15T05:58:02.000Z | 2022-03-31T22:59:25.000Z | pyslowloris/attack.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 8 | 2017-08-25T04:14:19.000Z | 2021-09-10T06:21:33.000Z | pyslowloris/attack.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 32 | 2017-03-22T22:52:26.000Z | 2022-03-07T15:53:01.000Z | """
MIT License
Copyright (c) 2020 Maxim Krivich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
import trio
from pyslowloris import HostAddress, SlowLorisConnection
from pyslowloris import exceptions as exc
class SlowLorisAttack:
__slots__ = ("_target", "_silent", "_connections_count", "_sleep_time", )
DEFAULT_SLEEP_TIME = 2
DEFAULT_RANDOM_RANGE = [1, 999999]
def __init__(
self, target: HostAddress, connections_count: int,
*, sleep_time: int = None, silent: bool = True
):
self._target = target
self._silent = silent
self._connections_count = connections_count
self._sleep_time = sleep_time or self.DEFAULT_SLEEP_TIME
def __repr__(self) -> str:
internal_dict = {key: getattr(self, key) for key in self.__slots__}
args = ",".join([f"{k}={repr(v)}" for (k, v) in internal_dict.items()])
return f"{self.__class__.__name__}({args.rstrip(',')})"
async def _atack_coroutine(self) -> None:
while True:
try:
conn = SlowLorisConnection(self._target)
await conn.establish_connection()
async with conn.with_stream():
await conn.send_initial_headers()
while True:
rand = random.randint(*self.DEFAULT_RANDOM_RANGE)
await conn.send(f"X-a: {rand}\r\n")
await trio.sleep(self._sleep_time)
except trio.BrokenResourceError as e:
if not self._silent:
raise exc.ConnectionClosedError("Socket is broken.") from e
async def _run(self) -> None:
async with trio.open_nursery() as nursery:
for _ in range(self._connections_count):
nursery.start_soon(self._atack_coroutine)
def start(self) -> None:
"""Start slow loris attack."""
try:
trio.run(self._run)
except exc.ConnectionClosedError:
raise
except OSError:
# Too much opened connections
if not self._silent:
raise exc.TooManyActiveConnectionsError(
"Too many opened connections."
)
except Exception as ex:
raise exc.SlowLorisBaseError("Something went wrong.") from ex
| 37.449438 | 79 | 0.658266 |
7954f82d239e542282c5bffa346183deae986f0d | 16,259 | py | Python | offline_regression.py | alankarj/bioasq_l2r | b11b7a91a95f8086df371b678058a2237f992eb3 | [
"Apache-2.0"
] | null | null | null | offline_regression.py | alankarj/bioasq_l2r | b11b7a91a95f8086df371b678058a2237f992eb3 | [
"Apache-2.0"
] | null | null | null | offline_regression.py | alankarj/bioasq_l2r | b11b7a91a95f8086df371b678058a2237f992eb3 | [
"Apache-2.0"
] | null | null | null | """ Offline Supervised Regression Model Train and Testing
Usage:
offline_regression.py train [options]
offline_regression.py baseline [options]
offline_regression.py test --model-path=<file> [options]
offline_regression.py example --model-path=<file> [options]
Options:
--data-dir=<file> Directory to data [default: ./data/summary]
--sentence-only Sentence only features
--question-only Question only features
--factoid-also Add factoid questions to the training and test data
--custom-featurizer Add a custom featurizer
-f --feature=<str> Feature type to use [default: TF-IDF]
-l --label=<str> Label type to use [default: JACCARD]
-s --scale=<int> Scale scores and cut off to integer [default: 100]
-i --interval=<float> Bucket interval [default: 0.1]
--model=<str> Model type to use [default: LinearRegression]
--seed=<int> Seed number for random generator [default: 11731]
--save-dir=<file> Directory to save trained model [default: ./save_regression]
--model-path=<file> Path to model pickle [default: ./save/LinearSVC_TF-IDF_JACCARD_0.1.pickle]
"""
import copy
import json
import math
import os
import pickle
import random
import numpy as np
from docopt import docopt
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
from scipy.sparse import hstack
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LinearRegression
from sklearn.manifold import TSNE
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.svm import SVR, LinearSVR
from sklearn.utils.class_weight import compute_sample_weight
from deiis.model import DataSet, Serializer
from custom_featurizer import CustomFeaturizer
from similarity_scoring import SimilarityJaccard
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
def scale_scores(Y, scale=100):
""" Scale and quantize scores to integer
"""
if not isinstance(Y, np.ndarray):
Y = np.array(Y)
# Quantize
Y = Y * scale
if scale > 1: # If scale up, we quantize to integer
Y = np.round(Y)
return Y
class RegressionModelWrapper(object):
"""
Includes
- featurizer
- classifier
- label to score dict
"""
def __init__(self, featurizers, label_type, clf, scale):
self.featurizers = featurizers
self.label_type = label_type
self.clf = clf
self.scale = scale
def score(self, question, sentence):
# Featurizer
if len(self.featurizers) == 4:
sent_featurizer, question_featurizer, custom_featurizer, pca = self.featurizers
custom_feature = custom_featurizer.transform([question], [sentence])
elif len(self.featurizers) == 3:
sentence_featurizer, question_featurizer, pca = self.featurizers
else:
sentence_featurizer, question_featurizer = self.featurizers
sentence_feature = sentence_featurizer.transform([sentence])
question_feature = question_featurizer.transform([question])
if len(self.featurizers) == 3:
feature = hstack([sentence_feature, question_feature]).toarray()
elif len(self.featurizers) == 4:
feature = hstack([sentence_feature, question_feature, custom_feature]).toarray()
if len(self.featurizers) >= 3:
feature = pca.transform(feature)
pred_scale = self.clf.predict(feature)
pred_score = scale_scores(pred_scale, 1./self.scale)
return pred_score[0]
def save_to_pickle(obj, save_name):
"""
Save Model
"""
with open(save_name, 'wb') as fout:
pickle.dump(obj, fout)
def get_sentences(question):
sentences = []
for snippet in question.snippets:
text = unicode(snippet.text).encode("ascii", "ignore")
if text == "":
continue
try:
sentences += sent_tokenize(text)
except:
sentences += text.split(". ") # Notice the space after the dot
clean_sentences = []
for sent in sentences:
sent = sent.lstrip(". ")
if sent.replace(" ", "") != "":
clean_sentences.append(sent)
return clean_sentences
def get_all_sentences(summary_type_questions):
#
num_sentences = 0
for question in summary_type_questions:
question.sentences = get_sentences(question)
num_sentences += len(question.sentences)
print 'Total number of sentences: ', num_sentences
return summary_type_questions
def read_summary_questions(filepath, factoid_also=False):
with open(filepath, 'r') as fin:
dataset = Serializer.parse(fin, DataSet)
summary_questions = []
for question in dataset.questions:
if question.type == "summary":
summary_questions.append(question)
if factoid_also and question.type == "factoid":
summary_questions.append(question)
summary_questions = get_all_sentences(summary_questions)
return summary_questions
def create_featurizers(feature_type, custom_feat=False):
"""Create featurizers and return as list
1. sentence featurizer
2. question featurizer
3. pca
"""
# Sentence & Question
if feature_type == "COUNT":
sent_featurizer = CountVectorizer(max_features=10000)
elif feature_type == "TF-IDF":
sent_featurizer = TfidfVectorizer(max_features=10000)
else:
raise ValueError("Unknown feature_type: {}".format(feature_type))
question_featurizer = copy.deepcopy(sent_featurizer)
# PCA
pca = PCA(n_components=300)
if not custom_feat:
custom_featurizer = None
else:
custom_featurizer = CustomFeaturizer()
all_featurizers = [sent_featurizer, question_featurizer, custom_featurizer, pca]
return all_featurizers
def get_labels(summary_type_questions, label_type):
print "Getting labels..."
all_scores = list()
stopWords = set(stopwords.words('english'))
stemmer = PorterStemmer()
similarity = SimilarityJaccard(stopWords, stemmer)
count = 0
for i, question in enumerate(summary_type_questions):
count += 1
# print "Question-", i
# print "ID: ", question.id
list_of_sets = []
if type(question.ideal_answer) == list:
for ideal_answer in question.ideal_answer:
list_of_sets.append(
set([
i.lower() for i in word_tokenize(ideal_answer)
if i.lower() not in stopWords
]))
else:
list_of_sets.append(
set([
i.lower() for i in word_tokenize(question.ideal_answer)
if i.lower() not in stopWords
]))
for sentence in question.sentences:
# print sentence
scores = []
for s2 in list_of_sets:
if label_type == "JACCARD":
scores.append(similarity.calculateSimilarity(sentence, s2))
elif label_type == "ROUGE":
scores.append(similarity.calculateRouge(sentence, s2))
elif label_type == "JACCARD_STEM":
scores.append(similarity.caculateSimilarityWithStem(sentence, s2))
else:
raise ValueError(
"Unknown label type: {}".format(label_type))
one_score = sum(scores) / len(scores)
all_scores.append(one_score)
all_scores = np.array(all_scores)
print all_scores
print "Average: ", np.mean(all_scores)
print "Std. Dev.: ", np.std(all_scores)
print "Min.: ", np.min(all_scores)
print "Max.: ", np.max(all_scores)
print "Number of questions: ", count
return all_scores
def featurize(summary_questions, all_featurizers, sentence_only=False, question_only=False, train=False):
"""
Featurize with given featurizer
"""
print("[featurize]", "train", train)
# Process into question + sentence data samples
question_list = []
sentence_list = []
for question in summary_questions:
for sentence in question.sentences:
question_list.append(question.body)
sentence_list.append(sentence)
# Process word tokens into feature array
sent_featurizer, question_featurizer, custom_featurizer, pca = all_featurizers
if train:
sent_featurizer.fit(sentence_list)
question_featurizer.fit(question_list)
if custom_featurizer is not None:
custom_featurizer.fit(question_list, sentence_list)
sentence_features = sent_featurizer.transform(sentence_list)
question_features = question_featurizer.transform(question_list)
if custom_featurizer is not None:
custom_features = custom_featurizer.transform(question_list, sentence_list)
if sentence_only:
X = sentence_features.toarray()
elif question_only:
X = question_features.toarray()
else:
X = hstack([sentence_features, question_features]).toarray()
# PCA the feature array
if train:
pca.fit(X)
X = pca.transform(X)
if custom_featurizer is not None:
X = np.hstack((X, custom_features))
return X
def score_to_bin(Y, interval=0.1):
"""
Maps scores to category, and also returns a dictionary
"""
# [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 ]
cat2score = {}
intervals = np.arange(0, 1.0, interval)
for idx, score in enumerate(intervals):
cat2score[idx] = score
Y_cat = []
for y in Y:
assert 0 <= y <= 1.0
label = int(math.floor(y/interval))
if y == 1:
label = len(intervals)-1
Y_cat.append(label)
Y_cat = np.array(Y_cat)
return Y_cat, cat2score
def plot_dist(y, file_name):
sns.distplot(y, norm_hist=True)
plt.xlabel("Label values")
plt.ylabel("Histogram/Density")
plt.savefig(file_name, bbox_inches="tight")
plt.show()
def train(opt):
# Process data
data_dir = opt["--data-dir"]
factoid_also = bool(opt["--factoid-also"])
custom_feat = bool(opt["--custom-featurizer"])
if factoid_also:
train_path = os.path.join(data_dir, "summary_factoid.train.json")
else:
train_path = os.path.join(data_dir, "summary.train.json")
feature_type = opt["--feature"]
label_type = opt["--label"]
question_only = bool(opt["--question-only"])
sentence_only = bool(opt["--sentence-only"])
train_questions = read_summary_questions(train_path, factoid_also)
all_featurizers = create_featurizers(feature_type, custom_feat=custom_feat)
print("Featurizing")
X_train = featurize(train_questions, all_featurizers,
sentence_only, question_only, train=True)
Y_train = get_labels(train_questions, label_type)
# plot_dist(Y_train, file_name=label_type + "_" + str(factoid_also) + ".pdf")
print "Number of sentences: ", Y_train.shape[0]
print("X_train", X_train.shape, "Y_train", Y_train.shape)
print "X_train mean: ", X_train
scale = int(opt['--scale'])
Y_train_scale = scale_scores(Y_train, scale)
# Load model
model_type = opt["--model"]
print("Model:", model_type)
if model_type == "LinearRegression":
clf = LinearRegression()
elif model_type == "LinearSVR":
clf = LinearSVR(verbose=2)
elif model_type == "SVR":
clf = SVR(verbose=2)
else:
raise ValueError("Unknown model: {}".format(model_type))
# Train
print("Start training")
# # Create sample weights
# interval = float(opt['--interval'])
# Y_train_bin, cat2score = score_to_bin(Y_train, interval)
# sample_weight = compute_sample_weight("balanced", Y_train_bin)
# clf.fit(X_train, Y_train_scale, sample_weight=sample_weight)
clf.fit(X_train, Y_train_scale)
Y_train_pred_scale = clf.predict(X_train)
Y_train_pred = scale_scores(Y_train_pred_scale, scale=1./scale)
print("Scaled:")
mae = mean_absolute_error(Y_train_scale, Y_train_pred_scale)
mse = mean_squared_error(Y_train_scale, Y_train_pred_scale)
print("mean absolute error", mae)
print("mean squared error", mse)
print("Unscaled:")
print "Mean prediction: ", np.mean(Y_train_pred)
print "Std. prediction: ", np.std(Y_train_pred)
print "Max prediction: ", np.max(Y_train_pred)
print "Min prediction: ", np.min(Y_train_pred)
mae = mean_absolute_error(Y_train, Y_train_pred)
mse = mean_squared_error(Y_train, Y_train_pred)
print("mean absolute error", mae)
print("mean squared error", mse)
# Save Model
if custom_feat is not None:
new_featurizer = (all_featurizers[0], all_featurizers[1], all_featurizers[3])
else:
new_featurizer = all_featurizers
obj = (new_featurizer, label_type, clf, scale)
save_dir = opt["--save-dir"]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if sentence_only:
feature_type += "_s_only"
elif question_only:
feature_type += "_q_only"
model_name = "{}_{}_{}_{}_{}_{}".format(
model_type, feature_type, label_type, scale, custom_feat, factoid_also)
save_path = os.path.join(save_dir, model_name + ".pickle")
print("saving model to {}".format(save_path))
with open(save_path, "wb") as fout:
pickle.dump(obj, fout)
def test(opt):
""" Example Usage of Testing
"""
data_dir = opt["--data-dir"]
factoid_also = bool(opt["--factoid-also"])
custom_feat = bool(opt["--custom-featurizer"])
if factoid_also:
valid_path = os.path.join(data_dir, "summary_factoid.valid.json")
else:
valid_path = os.path.join(data_dir, "summary.valid.json")
valid_questions = read_summary_questions(valid_path, factoid_also)
model_path = opt["--model-path"]
with open(model_path, 'rb') as fin:
(all_featurizers, label_type, clf, scale) = pickle.load(fin)
if custom_feat:
all_featurizers = (all_featurizers[0], all_featurizers[1], CustomFeaturizer(), all_featurizers[2])
else:
all_featurizers = (all_featurizers[0], all_featurizers[1], None, all_featurizers[2])
question_only = bool(opt["--question-only"])
sentence_only = bool(opt["--sentence-only"])
X_valid = featurize(valid_questions, all_featurizers,
sentence_only, question_only)
Y_valid = get_labels(valid_questions, label_type)
print("X_valid", X_valid.shape, "Y_valid", Y_valid.shape)
Y_valid_scale = scale_scores(Y_valid, scale)
Y_valid_pred_scale = clf.predict(X_valid)
Y_valid_pred = scale_scores(Y_valid_pred_scale, scale=1./scale)
print("Scaled:")
mae = mean_absolute_error(Y_valid_scale, Y_valid_pred_scale)
mse = mean_squared_error(Y_valid_scale, Y_valid_pred_scale)
print("mean absolute error", mae)
print("mean squared error", mse)
print("Unscaled:")
mae = mean_absolute_error(Y_valid, Y_valid_pred)
mse = mean_squared_error(Y_valid, Y_valid_pred)
print("mean absolute error", mae)
print("mean squared error", mse)
def example(opt):
model_path = opt["--model-path"]
with open(model_path, 'rb') as fin:
model = pickle.load(fin)
question = 'What is the effect of TRH on myocardial contractility?'
sentence = 'Acute intravenous administration of TRH to rats with ischemic cardiomyopathy caused a significant increase in heart rate, mean arterial pressure, cardiac output, stroke volume, and cardiac contractility'
print("Question:", question)
print("Sentence:", sentence)
score = model.score(question, sentence)
print("Score:", score)
if __name__ == "__main__":
opt = docopt(__doc__)
if opt["train"]:
train(opt)
elif opt["test"]:
test(opt)
elif opt["example"]:
example(opt)
elif opt["visualize"]:
visualize_features(opt)
elif opt["baseline"]:
baseline(opt)
| 32.648594 | 219 | 0.658958 |
7954f847f70a2a541f9dd119f5f6022818422d87 | 261 | py | Python | test/case/export_mysql_dumper.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | test/case/export_mysql_dumper.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | test/case/export_mysql_dumper.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | from unittest import TestCase
from test.util import get_temp_file
from grab.export.mysql_dumper import MysqlCSVDumper
class MysqlCSVDumperTestCase(TestCase):
def test_constructor(self):
path = get_temp_file()
dumper = MysqlCSVDumper(path)
| 26.1 | 51 | 0.773946 |
7954f86cdba0cb1c520f3ab77c0bd5d1225abfdc | 18,900 | py | Python | superset/common/query_context_processor.py | iudeen/superset | d304849b46b39bb6a261b735b7ca658962bc31e0 | [
"Apache-2.0"
] | null | null | null | superset/common/query_context_processor.py | iudeen/superset | d304849b46b39bb6a261b735b7ca658962bc31e0 | [
"Apache-2.0"
] | 5 | 2022-03-25T19:48:31.000Z | 2022-03-25T20:02:59.000Z | superset/common/query_context_processor.py | iudeen/superset | d304849b46b39bb6a261b735b7ca658962bc31e0 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import copy
import logging
from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
from flask_babel import _
from pandas import DateOffset
from typing_extensions import TypedDict
from superset import app
from superset.annotation_layers.dao import AnnotationLayerDAO
from superset.charts.dao import ChartDAO
from superset.common.chart_data import ChartDataResultFormat
from superset.common.db_query_status import QueryStatus
from superset.common.query_actions import get_query_results
from superset.common.utils import dataframe_utils as df_utils
from superset.common.utils.query_cache_manager import QueryCacheManager
from superset.connectors.base.models import BaseDatasource
from superset.constants import CacheRegion
from superset.exceptions import (
InvalidPostProcessingError,
QueryObjectValidationError,
SupersetException,
)
from superset.extensions import cache_manager, security_manager
from superset.models.helpers import QueryResult
from superset.utils import csv
from superset.utils.cache import generate_cache_key, set_and_log_cache
from superset.utils.core import (
DTTM_ALIAS,
error_msg_from_exception,
get_column_names_from_columns,
get_column_names_from_metrics,
get_metric_names,
normalize_dttm_col,
TIME_COMPARISION,
)
from superset.utils.date_parser import get_past_or_future, normalize_time_delta
from superset.views.utils import get_viz
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.common.query_object import QueryObject
from superset.stats_logger import BaseStatsLogger
config = app.config
stats_logger: BaseStatsLogger = config["STATS_LOGGER"]
logger = logging.getLogger(__name__)
class CachedTimeOffset(TypedDict):
df: pd.DataFrame
queries: List[str]
cache_keys: List[Optional[str]]
class QueryContextProcessor:
"""
The query context contains the query object and additional fields necessary
to retrieve the data payload for a given viz.
"""
_query_context: QueryContext
_qc_datasource: BaseDatasource
"""
The query context contains the query object and additional fields necessary
to retrieve the data payload for a given viz.
"""
def __init__(self, query_context: QueryContext):
self._query_context = query_context
self._qc_datasource = query_context.datasource
cache_type: ClassVar[str] = "df"
enforce_numerical_metrics: ClassVar[bool] = True
def get_df_payload(
self, query_obj: QueryObject, force_cached: Optional[bool] = False
) -> Dict[str, Any]:
"""Handles caching around the df payload retrieval"""
cache_key = self.query_cache_key(query_obj)
cache = QueryCacheManager.get(
cache_key, CacheRegion.DATA, self._query_context.force, force_cached,
)
if query_obj and cache_key and not cache.is_loaded:
try:
invalid_columns = [
col
for col in get_column_names_from_columns(query_obj.columns)
+ get_column_names_from_metrics(query_obj.metrics or [])
if (
col not in self._qc_datasource.column_names
and col != DTTM_ALIAS
)
]
if invalid_columns:
raise QueryObjectValidationError(
_(
"Columns missing in datasource: %(invalid_columns)s",
invalid_columns=invalid_columns,
)
)
query_result = self.get_query_result(query_obj)
annotation_data = self.get_annotation_data(query_obj)
cache.set_query_result(
key=cache_key,
query_result=query_result,
annotation_data=annotation_data,
force_query=self._query_context.force,
timeout=self.get_cache_timeout(),
datasource_uid=self._qc_datasource.uid,
region=CacheRegion.DATA,
)
except QueryObjectValidationError as ex:
cache.error_message = str(ex)
cache.status = QueryStatus.FAILED
return {
"cache_key": cache_key,
"cached_dttm": cache.cache_dttm,
"cache_timeout": self.get_cache_timeout(),
"df": cache.df,
"applied_template_filters": cache.applied_template_filters,
"annotation_data": cache.annotation_data,
"error": cache.error_message,
"is_cached": cache.is_cached,
"query": cache.query,
"status": cache.status,
"stacktrace": cache.stacktrace,
"rowcount": len(cache.df.index),
"from_dttm": query_obj.from_dttm,
"to_dttm": query_obj.to_dttm,
}
def query_cache_key(self, query_obj: QueryObject, **kwargs: Any) -> Optional[str]:
"""
Returns a QueryObject cache key for objects in self.queries
"""
datasource = self._qc_datasource
extra_cache_keys = datasource.get_extra_cache_keys(query_obj.to_dict())
cache_key = (
query_obj.cache_key(
datasource=datasource.uid,
extra_cache_keys=extra_cache_keys,
rls=security_manager.get_rls_cache_key(datasource),
changed_on=datasource.changed_on,
**kwargs,
)
if query_obj
else None
)
return cache_key
def get_query_result(self, query_object: QueryObject) -> QueryResult:
"""Returns a pandas dataframe based on the query object"""
query_context = self._query_context
# Here, we assume that all the queries will use the same datasource, which is
# a valid assumption for current setting. In the long term, we may
# support multiple queries from different data sources.
# The datasource here can be different backend but the interface is common
result = query_context.datasource.query(query_object.to_dict())
query = result.query + ";\n\n"
df = result.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic
if not df.empty:
df = self.normalize_df(df, query_object)
if query_object.time_offsets:
time_offsets = self.processing_time_offsets(df, query_object)
df = time_offsets["df"]
queries = time_offsets["queries"]
query += ";\n\n".join(queries)
query += ";\n\n"
# Re-raising QueryObjectValidationError
try:
df = query_object.exec_post_processing(df)
except InvalidPostProcessingError as ex:
raise QueryObjectValidationError from ex
result.df = df
result.query = query
result.from_dttm = query_object.from_dttm
result.to_dttm = query_object.to_dttm
return result
def normalize_df(self, df: pd.DataFrame, query_object: QueryObject) -> pd.DataFrame:
datasource = self._qc_datasource
timestamp_format = None
if datasource.type == "table":
dttm_col = datasource.get_column(query_object.granularity)
if dttm_col:
timestamp_format = dttm_col.python_date_format
normalize_dttm_col(
df=df,
timestamp_format=timestamp_format,
offset=datasource.offset,
time_shift=query_object.time_shift,
)
if self.enforce_numerical_metrics:
df_utils.df_metrics_to_num(df, query_object)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def processing_time_offsets( # pylint: disable=too-many-locals
self, df: pd.DataFrame, query_object: QueryObject,
) -> CachedTimeOffset:
query_context = self._query_context
# ensure query_object is immutable
query_object_clone = copy.copy(query_object)
queries: List[str] = []
cache_keys: List[Optional[str]] = []
rv_dfs: List[pd.DataFrame] = [df]
time_offsets = query_object.time_offsets
outer_from_dttm = query_object.from_dttm
outer_to_dttm = query_object.to_dttm
for offset in time_offsets:
try:
query_object_clone.from_dttm = get_past_or_future(
offset, outer_from_dttm,
)
query_object_clone.to_dttm = get_past_or_future(offset, outer_to_dttm)
except ValueError as ex:
raise QueryObjectValidationError(str(ex)) from ex
# make sure subquery use main query where clause
query_object_clone.inner_from_dttm = outer_from_dttm
query_object_clone.inner_to_dttm = outer_to_dttm
query_object_clone.time_offsets = []
query_object_clone.post_processing = []
if not query_object.from_dttm or not query_object.to_dttm:
raise QueryObjectValidationError(
_(
"An enclosed time range (both start and end) must be specified "
"when using a Time Comparison."
)
)
# `offset` is added to the hash function
cache_key = self.query_cache_key(query_object_clone, time_offset=offset)
cache = QueryCacheManager.get(
cache_key, CacheRegion.DATA, query_context.force
)
# whether hit on the cache
if cache.is_loaded:
rv_dfs.append(cache.df)
queries.append(cache.query)
cache_keys.append(cache_key)
continue
query_object_clone_dct = query_object_clone.to_dict()
# rename metrics: SUM(value) => SUM(value) 1 year ago
metrics_mapping = {
metric: TIME_COMPARISION.join([metric, offset])
for metric in get_metric_names(
query_object_clone_dct.get("metrics", [])
)
}
join_keys = [col for col in df.columns if col not in metrics_mapping.keys()]
result = self._qc_datasource.query(query_object_clone_dct)
queries.append(result.query)
cache_keys.append(None)
offset_metrics_df = result.df
if offset_metrics_df.empty:
offset_metrics_df = pd.DataFrame(
{
col: [np.NaN]
for col in join_keys + list(metrics_mapping.values())
}
)
else:
# 1. normalize df, set dttm column
offset_metrics_df = self.normalize_df(
offset_metrics_df, query_object_clone
)
# 2. rename extra query columns
offset_metrics_df = offset_metrics_df.rename(columns=metrics_mapping)
# 3. set time offset for index
# TODO: add x-axis to QueryObject, potentially as an array for
# multi-dimensional charts
granularity = query_object.granularity
index = granularity if granularity in df.columns else DTTM_ALIAS
offset_metrics_df[index] = offset_metrics_df[index] - DateOffset(
**normalize_time_delta(offset)
)
# df left join `offset_metrics_df`
offset_df = df_utils.left_join_df(
left_df=df, right_df=offset_metrics_df, join_keys=join_keys,
)
offset_slice = offset_df[metrics_mapping.values()]
# set offset_slice to cache and stack.
value = {
"df": offset_slice,
"query": result.query,
}
cache.set(
key=cache_key,
value=value,
timeout=self.get_cache_timeout(),
datasource_uid=query_context.datasource.uid,
region=CacheRegion.DATA,
)
rv_dfs.append(offset_slice)
rv_df = pd.concat(rv_dfs, axis=1, copy=False) if time_offsets else df
return CachedTimeOffset(df=rv_df, queries=queries, cache_keys=cache_keys)
def get_data(self, df: pd.DataFrame) -> Union[str, List[Dict[str, Any]]]:
if self._query_context.result_format == ChartDataResultFormat.CSV:
include_index = not isinstance(df.index, pd.RangeIndex)
columns = list(df.columns)
verbose_map = self._qc_datasource.data.get("verbose_map", {})
if verbose_map:
df.columns = [verbose_map.get(column, column) for column in columns]
result = csv.df_to_escaped_csv(
df, index=include_index, **config["CSV_EXPORT"]
)
return result or ""
return df.to_dict(orient="records")
def get_payload(
self, cache_query_context: Optional[bool] = False, force_cached: bool = False,
) -> Dict[str, Any]:
"""Returns the query results with both metadata and data"""
# Get all the payloads from the QueryObjects
query_results = [
get_query_results(
query_obj.result_type or self._query_context.result_type,
self._query_context,
query_obj,
force_cached,
)
for query_obj in self._query_context.queries
]
return_value = {"queries": query_results}
if cache_query_context:
cache_key = self.cache_key()
set_and_log_cache(
cache_manager.cache,
cache_key,
{"data": self._query_context.cache_values},
self.get_cache_timeout(),
)
return_value["cache_key"] = cache_key # type: ignore
return return_value
def get_cache_timeout(self) -> int:
cache_timeout_rv = self._query_context.get_cache_timeout()
if cache_timeout_rv:
return cache_timeout_rv
return config["CACHE_DEFAULT_TIMEOUT"]
def cache_key(self, **extra: Any) -> str:
"""
The QueryContext cache key is made out of the key/values from
self.cached_values, plus any other key/values in `extra`. It includes only data
required to rehydrate a QueryContext object.
"""
key_prefix = "qc-"
cache_dict = self._query_context.cache_values.copy()
cache_dict.update(extra)
return generate_cache_key(cache_dict, key_prefix)
def get_annotation_data(self, query_obj: QueryObject) -> Dict[str, Any]:
"""
:param query_context:
:param query_obj:
:return:
"""
annotation_data: Dict[str, Any] = self.get_native_annotation_data(query_obj)
for annotation_layer in [
layer
for layer in query_obj.annotation_layers
if layer["sourceType"] in ("line", "table")
]:
name = annotation_layer["name"]
annotation_data[name] = self.get_viz_annotation_data(
annotation_layer, self._query_context.force
)
return annotation_data
@staticmethod
def get_native_annotation_data(query_obj: QueryObject) -> Dict[str, Any]:
annotation_data = {}
annotation_layers = [
layer
for layer in query_obj.annotation_layers
if layer["sourceType"] == "NATIVE"
]
layer_ids = [layer["value"] for layer in annotation_layers]
layer_objects = {
layer_object.id: layer_object
for layer_object in AnnotationLayerDAO.find_by_ids(layer_ids)
}
# annotations
for layer in annotation_layers:
layer_id = layer["value"]
layer_name = layer["name"]
columns = [
"start_dttm",
"end_dttm",
"short_descr",
"long_descr",
"json_metadata",
]
layer_object = layer_objects[layer_id]
records = [
{column: getattr(annotation, column) for column in columns}
for annotation in layer_object.annotation
]
result = {"columns": columns, "records": records}
annotation_data[layer_name] = result
return annotation_data
@staticmethod
def get_viz_annotation_data(
annotation_layer: Dict[str, Any], force: bool
) -> Dict[str, Any]:
chart = ChartDAO.find_by_id(annotation_layer["value"])
form_data = chart.form_data.copy()
if not chart:
raise QueryObjectValidationError(_("The chart does not exist"))
try:
viz_obj = get_viz(
datasource_type=chart.datasource.type,
datasource_id=chart.datasource.id,
form_data=form_data,
force=force,
)
payload = viz_obj.get_payload()
return payload["data"]
except SupersetException as ex:
raise QueryObjectValidationError(error_msg_from_exception(ex)) from ex
def raise_for_access(self) -> None:
"""
Raise an exception if the user cannot access the resource.
:raises SupersetSecurityException: If the user cannot access the resource
"""
for query in self._query_context.queries:
query.validate()
security_manager.raise_for_access(query_context=self._query_context)
| 38.729508 | 88 | 0.617725 |
7954f9dd58bf9eb5823a0fd415d17fe2a88be583 | 3,434 | py | Python | mbed-client/factory-configurator-client/TESTS/generate-test-data/generate_test_utils.py | ghsecuritylab/mini-client | 955e1d213ec0784101061075932dbbac97d98cd9 | [
"Apache-2.0"
] | null | null | null | mbed-client/factory-configurator-client/TESTS/generate-test-data/generate_test_utils.py | ghsecuritylab/mini-client | 955e1d213ec0784101061075932dbbac97d98cd9 | [
"Apache-2.0"
] | null | null | null | mbed-client/factory-configurator-client/TESTS/generate-test-data/generate_test_utils.py | ghsecuritylab/mini-client | 955e1d213ec0784101061075932dbbac97d98cd9 | [
"Apache-2.0"
] | 1 | 2020-03-06T22:23:26.000Z | 2020-03-06T22:23:26.000Z | # ----------------------------------------------------------------------------
# Copyright 2016-2017 ARM Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import pickle
import re
import sys
import shutil
import subprocess
import tempfile
import argparse
import re
from subprocess import Popen, PIPE, STDOUT
assets_dict = dict()
cbor_dict = dict()
def save_to_file(path, filename, content, filetype=None):
complete_name = os.path.join(path, filename)
if filetype == "bin": #binary file output
handler = open(complete_name + ".bin", 'wb')
elif filetype == "text" : #text output
handler = open(complete_name + ".hex", 'w')
else:
handler = open(complete_name, 'w')
handler.write(content)
handler.close()
def shell_process(cmd, out_dir = None, out_name = None, out=None,input=None):
"""
Execute shell command and get its output and input as optional parameter
@params:
cmd - Required : command to execute
Retruns stdout of the process
"""
out_data= ""
print cmd
if (out and input):
p = Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p.stdin.write(input)
out_data, error = p.communicate()
elif out:
p = Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out_data, error = p.communicate()
elif input:
p = Popen(cmd, shell=True, stdin=subprocess.PIPE,stderr=subprocess.PIPE)
p.stdin.write(input)
error = p.communicate()
else :
p = Popen(cmd, shell=True, stderr=subprocess.PIPE)
error = p.communicate()
if p.returncode != 0:
sys.stdout.write("\n The command {0} failed, please check the command, error is :\n {1}".format(cmd, error))
exit(1)
if (out):
save_to_file(out_dir, out_name, out_data)
assets_dict[out_name] = out_data
def parse_command_line():
parser = argparse.ArgumentParser(description = 'Generate test data')
parser.add_argument('-cf', '--c_file',help='path to c data file')
parser.add_argument('-hf', '--h_file',help='path to h data file')
parser.add_argument('-d','--data_dir',help = 'directory of data c and h files')
parser.add_argument('-f','--files_dir',help = 'directory of out data files')
parser.add_argument('-p','--prefix',help = 'prefix of data buffers')
if (len(sys.argv)) != 6 :
parser.print_help()
exit (1)
return parser.parse_args()
def read_file(file_path, mode='r'):
print "The path is:"+file_path
with open(file_path, mode=mode) as file:
data = file.read()
return data
def write_data_to_file(file_path,data):
open_mode = 'wb'
with open(file_path, open_mode) as f:
f.write(data)
print('wrote data into file: ' + file_path)
| 33.666667 | 116 | 0.637158 |
7954fa17285bdad5ae5291499ec86a44ea1b2732 | 430 | py | Python | tests/unit/ses/conftest.py | mixja/ses-custom-resources | 1523c632fd7f4c4ef0e55533e8f06376186205d8 | [
"MIT"
] | null | null | null | tests/unit/ses/conftest.py | mixja/ses-custom-resources | 1523c632fd7f4c4ef0e55533e8f06376186205d8 | [
"MIT"
] | null | null | null | tests/unit/ses/conftest.py | mixja/ses-custom-resources | 1523c632fd7f4c4ef0e55533e8f06376186205d8 | [
"MIT"
] | null | null | null | from importlib import reload
import pytest
@pytest.fixture
def app(mocker):
"""
Fixture for node app
"""
# Patch environment variables
mocker.patch.dict('os.environ', values=dict(
MAX_WORKERS='4'
))
from app import ses as app
# Override app parameters with mocks here
# app.client = mock_dynamo_client
# app.client.get.return_value = mock_event_data
yield app
reload(app) | 21.5 | 51 | 0.669767 |
7954fa504bb455cb739711aeb41b27121e4d9ddc | 1,970 | py | Python | src/goodbooks-10k/utils/evaluate.py | ds-wook/goodbooks-10k | 26ec6256405b6e8ecd04d4bb80c063ea43b1fb8c | [
"Apache-2.0"
] | 10 | 2021-01-29T09:21:28.000Z | 2021-09-05T13:24:58.000Z | src/goodbooks-10k/utils/evaluate.py | ds-wook/goodbooks-10k | 26ec6256405b6e8ecd04d4bb80c063ea43b1fb8c | [
"Apache-2.0"
] | null | null | null | src/goodbooks-10k/utils/evaluate.py | ds-wook/goodbooks-10k | 26ec6256405b6e8ecd04d4bb80c063ea43b1fb8c | [
"Apache-2.0"
] | null | null | null | import six
import math
from typing import Dict, List
class Evaluate:
def __init__(
self, recs: Dict[int, List[int]], gt: Dict[int, List[int]], topn: int = 100
):
self.recs = recs
self.gt = gt
self.topn = topn
def _ndcg(self) -> float:
Q, S = 0.0, 0.0
for u, seen in six.iteritems(self.gt):
seen = list(set(seen))
rec = self.recs.get(u, [])
if not rec or len(seen) == 0:
continue
dcg = 0.0
idcg = sum(
[1.0 / math.log(i + 2, 2) for i in range(min(len(seen), len(rec)))]
)
for i, r in enumerate(rec):
if r not in seen:
continue
rank = i + 1
dcg += 1.0 / math.log(rank + 1, 2)
ndcg = dcg / idcg
S += ndcg
Q += 1
return S / Q
def _map(self) -> float:
n, ap = 0.0, 0.0
for u, seen in six.iteritems(self.gt):
seen = list(set(seen))
rec = self.recs.get(u, [])
if not rec or len(seen) == 0:
continue
_ap, correct = 0.0, 0.0
for i, r in enumerate(rec):
if r in seen:
correct += 1
_ap += correct / (i + 1.0)
_ap /= min(len(seen), len(rec))
ap += _ap
n += 1.0
return ap / n
def _entropy_diversity(self) -> float:
sz = float(len(self.recs)) * self.topn
freq = {}
for u, rec in six.iteritems(self.recs):
for r in rec:
freq[r] = freq.get(r, 0) + 1
ent = -sum([v / sz * math.log(v / sz) for v in six.itervalues(freq)])
return ent
def _evaluate(self):
print(f"MAP@{self.topn}: {self._map()}")
print(f"NDCG@{self.topn}: {self._ndcg()}")
print(f"EntDiv@{self.topn}: {self._entropy_diversity()}")
| 29.402985 | 83 | 0.443655 |
7954faa5e93e168704286e6ca4a0418ae123332f | 25,413 | py | Python | fastai/data_block.py | wrrnwng/fastai | ccd3ec66d5e46c97ab99313b68d35a0d7dd4fb91 | [
"Apache-2.0"
] | null | null | null | fastai/data_block.py | wrrnwng/fastai | ccd3ec66d5e46c97ab99313b68d35a0d7dd4fb91 | [
"Apache-2.0"
] | null | null | null | fastai/data_block.py | wrrnwng/fastai | ccd3ec66d5e46c97ab99313b68d35a0d7dd4fb91 | [
"Apache-2.0"
] | null | null | null | from .torch_core import *
from .basic_data import *
from .layers import *
__all__ = ['ItemList', 'CategoryList', 'MultiCategoryList', 'MultiCategoryProcessor', 'LabelList', 'ItemLists', 'get_files',
'PreProcessor', 'LabelLists', 'FloatList', 'CategoryProcessor']
def _decode(df):
return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x in df.values], dtype=np.object)
def _maybe_squeeze(arr): return (arr if is1d(arr) else np.squeeze(arr))
def _get_files(parent, p, f, extensions):
p = Path(p)#.relative_to(parent)
res = [p/o for o in f if not o.startswith('.')
and (extensions is None or f'.{o.split(".")[-1].lower()}' in extensions)]
return res
def get_files(path:PathOrStr, extensions:Collection[str]=None, recurse:bool=False,
include:Optional[Collection[str]]=None)->FilePathList:
"Return list of files in `path` that have a suffix in `extensions`; optionally `recurse`."
if recurse:
res = []
for p,d,f in os.walk(path):
# skip hidden dirs
if include is not None: d[:] = [o for o in d if o in include]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(path, p, f, extensions)
return res
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
return _get_files(path, path, f, extensions)
class PreProcessor():
"Basic class for a processor that will be applied to items at the end of the data block API."
def __init__(self, ds:Collection=None): self.ref_ds = ds
def process_one(self, item:Any): return item
def process(self, ds:Collection): ds.items = array([self.process_one(item) for item in ds.items])
class ItemList():
_bunch,_processor,_label_cls,_square_show = DataBunch,None,None,False
"A collection of items with `__len__` and `__getitem__` with `ndarray` indexing semantics."
def __init__(self, items:Iterator, path:PathOrStr='.',
label_cls:Callable=None, xtra:Any=None, processor:PreProcessor=None, x:'ItemList'=None, **kwargs):
self.path = Path(path)
self.num_parts = len(self.path.parts)
self.items,self.x = array(items, dtype=object),x
self.label_cls,self.xtra,self.processor = ifnone(label_cls,self._label_cls),xtra,processor
self._label_list,self._split = LabelList,ItemLists
self.copy_new = ['x', 'label_cls', 'path']
self.__post_init__()
def __post_init__(self): pass
def __len__(self)->int: return len(self.items) or 1
def get(self, i)->Any:
"Subclass if you want to customize how to create item `i` from `self.items`."
return self.items[i]
def __repr__(self)->str:
items = [self[i] for i in range(min(5,len(self.items)))]
return f'{self.__class__.__name__} ({len(self)} items)\n{items}...\nPath: {self.path}'
def process(self, processor=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self
def process_one(self, item, processor=None):
"Apply `processor` or `self.processor` to `item`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: item = p.process_one(item)
return item
def analyze_pred(self, pred:Tensor):
"Called on `pred` before `reconstruct` for additional preprocessing."
return pred
def reconstruct(self, t:Tensor, x:Tensor=None):
"Reconstuct one of the underlying item for its data `t`."
return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t)
def new(self, items:Iterator, processor:PreProcessor=None, **kwargs)->'ItemList':
"Create a new `ItemList` from `items`, keeping the same attributes."
processor = ifnone(processor, self.processor)
copy_d = {o:getattr(self,o) for o in self.copy_new}
return self.__class__(items=items, processor=processor, **copy_d, **kwargs)
def __getitem__(self,idxs:int)->Any:
if isinstance(try_int(idxs), int): return self.get(idxs)
else: return self.new(self.items[idxs], xtra=index_row(self.xtra, idxs))
@classmethod
def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse=True,
include:Optional[Collection[str]]=None, **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`. `recurse` determines if we search subfolders."
path = Path(path)
return cls(get_files(path, extensions, recurse=recurse, include=include), path=path, **kwargs)
@classmethod
def from_df(cls, df:DataFrame, path:PathOrStr='.', cols:IntsOrStrs=0, **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the inputs in the `cols` of `df`."
inputs = df.iloc[:,df_names_to_idx(cols, df)]
res = cls(items=_maybe_squeeze(inputs.values), path=path, xtra = df, **kwargs)
return res
@classmethod
def from_csv(cls, path:PathOrStr, csv_name:str, cols:IntsOrStrs=0, header:str='infer', **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the inputs in the `cols` of `path/csv_name` opened with `header`."
df = pd.read_csv(Path(path)/csv_name, header=header)
return cls.from_df(df, path=path, cols=cols, **kwargs)
def _relative_item_path(self, i): return self.items[i].relative_to(self.path)
def _relative_item_paths(self): return [self._relative_item_path(i) for i in range_of(self.items)]
def use_partial_data(self, sample_pct:float=1.0, seed:int=None)->'ItemList':
"Use only a sample of `sample_pct`of the full dataset and an optional `seed`."
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(sample_pct * len(self))
return self[rand_idx[:cut]]
def to_text(self, fn:str):
"Save `self.items` to `fn` in `self.path`."
with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()])
def filter_by_func(self, func:Callable)->'ItemList':
"Only keep elements for which `func` returns `True`."
self.items = array([o for o in self.items if func(o)])
return self
def filter_by_folder(self, include=None, exclude=None):
"Only keep filenames in `include` folder or reject the ones in `exclude`."
include,exclude = listify(include),listify(exclude)
def _inner(o):
n = o.relative_to(self.path).parts[0]
if include and not n in include: return False
if exclude and n in exclude: return False
return True
return self.filter_by_func(_inner)
def filter_by_rand(self, p:float, seed:int=None):
"Keep random sample of `items` with probability `p` and an optional `seed`."
if seed is not None: np.random.seed(seed)
return self.filter_by_func(lambda o: rand_bool(p))
def split_by_list(self, train, valid):
"Split the data between `train` and `valid`."
return self._split(self.path, train, valid)
def split_by_idxs(self, train_idx, valid_idx):
"Split the data between `train_idx` and `valid_idx`."
return self.split_by_list(self[train_idx], self[valid_idx])
def split_by_idx(self, valid_idx:Collection[int])->'ItemLists':
"Split the data according to the indexes in `valid_idx`."
#train_idx = [i for i in range_of(self.items) if i not in valid_idx]
train_idx = np.setdiff1d(arange_of(self.items), valid_idx)
return self.split_by_idxs(train_idx, valid_idx)
def _get_by_folder(self, name):
return [i for i in range_of(self) if self.items[i].parts[self.num_parts]==name]
def split_by_folder(self, train:str='train', valid:str='valid')->'ItemLists':
"Split the data depending on the folder (`train` or `valid`) in which the filenames are."
return self.split_by_idxs(self._get_by_folder(train), self._get_by_folder(valid))
def random_split_by_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists':
"Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed."
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(valid_pct * len(self))
return self.split_by_idx(rand_idx[:cut])
def split_by_valid_func(self, func:Callable)->'ItemLists':
"Split the data by result of `func` (which returns `True` for validation set)."
valid_idx = [i for i,o in enumerate(self.items) if func(o)]
return self.split_by_idx(valid_idx)
def split_by_files(self, valid_names:'ItemList')->'ItemLists':
"Split the data by using the names in `valid_names` for validation."
return self.split_by_valid_func(lambda o: o.name in valid_names)
def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists':
"Split the data by using the names in `fname` for the validation set. `path` will override `self.path`."
path = Path(ifnone(path, self.path))
valid_names = loadtxt_str(self.path/fname)
return self.split_by_files(valid_names)
def split_from_df(self, col:IntsOrStrs=2):
"Split the data from the `col` in the dataframe in `self.xtra`."
valid_idx = np.where(self.xtra.iloc[:,df_names_to_idx(col, self.xtra)])[0]
return self.split_by_idx(valid_idx)
def get_label_cls(self, labels, label_cls:Callable=None, sep:str=None, **kwargs):
"Return `label_cls` or guess one from the first element of `labels`."
if label_cls is not None: return label_cls
if self.label_cls is not None: return self.label_cls
it = index_row(labels,0)
if sep is not None: return MultiCategoryList
if isinstance(it, (float, np.float32)): return FloatList
if isinstance(try_int(it), (str,int)): return CategoryList
if isinstance(it, Collection): return MultiCategoryList
return self.__class__
def label_from_list(self, labels:Iterator, **kwargs)->'LabelList':
"Label `self.items` with `labels`."
labels = array(labels, dtype=object)
label_cls = self.get_label_cls(labels, **kwargs)
y = label_cls(labels, path=self.path, **kwargs)
res = self._label_list(x=self, y=y)
return res
def label_from_df(self, cols:IntsOrStrs=1, **kwargs):
"Label `self.items` from the values in `cols` in `self.xtra`."
labels = _maybe_squeeze(self.xtra.iloc[:,df_names_to_idx(cols, self.xtra)])
return self.label_from_list(labels, **kwargs)
def label_const(self, const:Any=0, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, **kwargs)
def label_empty(self):
"Label every item with an `EmptyLabel`."
return self.label_from_func(func=lambda o: 0., label_cls=EmptyLabelList)
def label_from_func(self, func:Callable, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self.label_from_list([func(o) for o in self.items], **kwargs)
def label_from_folder(self, **kwargs)->'LabelList':
"Give a label to each filename depending on its folder."
return self.label_from_func(func=lambda o: o.parts[-2], **kwargs)
def label_from_re(self, pat:str, full_path:bool=False, **kwargs)->'LabelList':
"Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name."
pat = re.compile(pat)
def _inner(o):
s = str(os.path.join(self.path,o) if full_path else o)
res = pat.search(s)
assert res,f'Failed to find "{pat}" in "{s}"'
return res.group(1)
return self.label_from_func(_inner, **kwargs)
class EmptyLabelList(ItemList):
"Basic `ItemList` for dummy labels."
def get(self, i): return EmptyLabel()
def reconstruct(self, t:Tensor, x:Tensor=None):
if len(t.size()) == 0: return EmptyLabel()
return self.x.reconstruct(t,x) if has_arg(self.x.reconstruct, 'x') else self.x.reconstruct(t)
class CategoryProcessor(PreProcessor):
"Processor that create `classes` from `ds.items` and handle the mapping."
def __init__(self, ds:ItemList): self.create_classes(ds.classes)
def create_classes(self, classes):
self.classes = classes
if classes is not None: self.c2i = {v:k for k,v in enumerate(classes)}
def generate_classes(self, items):
"Generate classes from `items` by taking the sorted unique values."
return uniqueify(items)
def process_one(self,item): return self.c2i.get(item,None)
def process(self, ds):
if self.classes is None: self.create_classes(self.generate_classes(ds.items))
ds.classes = self.classes
ds.c2i = self.c2i
super().process(ds)
def __getstate__(self): return {'classes':self.classes}
def __setstate__(self, state:dict): self.create_classes(state['classes'])
class CategoryListBase(ItemList):
"Basic `ItemList` for classification."
def __init__(self, items:Iterator, classes:Collection=None,**kwargs):
self.classes=classes
super().__init__(items, **kwargs)
@property
def c(self): return len(self.classes)
def new(self, items, classes=None, **kwargs):
return super().new(items, classes=ifnone(classes, self.classes), **kwargs)
class CategoryList(CategoryListBase):
"Basic `ItemList` for single classification labels."
_processor=CategoryProcessor
def __init__(self, items:Iterator, classes:Collection=None, **kwargs):
super().__init__(items, classes=classes, **kwargs)
self.loss_func = CrossEntropyFlat()
def get(self, i):
o = self.items[i]
if o is None: return None
return Category(o, self.classes[o])
def analyze_pred(self, pred, thresh:float=0.5): return pred.argmax()
def reconstruct(self, t):
return Category(t, self.classes[t])
class MultiCategoryProcessor(CategoryProcessor):
"Processor that create `classes` from `ds.items` and handle the mapping."
def process_one(self,item): return [self.c2i.get(o,None) for o in item]
def generate_classes(self, items):
"Generate classes from `items` by taking the sorted unique values."
classes = set()
for c in items: classes = classes.union(set(c))
classes = list(classes)
classes.sort()
return classes
class MultiCategoryList(CategoryListBase):
"Basic `ItemList` for multi-classification labels."
_processor=MultiCategoryProcessor
def __init__(self, items:Iterator, classes:Collection=None, sep:str=None, **kwargs):
if sep is not None: items = array(csv.reader(items.astype(str), delimiter=sep))
super().__init__(items, classes=classes, **kwargs)
self.loss_func = BCEWithLogitsFlat()
def get(self, i):
o = self.items[i]
if o is None: return None
return MultiCategory(one_hot(o, self.c), [self.classes[p] for p in o], o)
def analyze_pred(self, pred, thresh:float=0.5):
return (pred >= thresh).float()
def reconstruct(self, t):
o = [i for i in range(self.c) if t[i] == 1.]
return MultiCategory(t, [self.classes[p] for p in o], o)
class FloatList(ItemList):
"`ItemList` suitable for storing the floats in items for regression. Will add a `log` if True"
def __init__(self, items:Iterator, log:bool=False, **kwargs):
super().__init__(np.array(items, dtype=np.float32), **kwargs)
self.log = log
self.copy_new.append('log')
self.c = self.items.shape[1] if len(self.items.shape) > 1 else 1
self.loss_func = MSELossFlat()
def get(self, i):
o = super().get(i)
return FloatItem(log(o) if self.log else o)
def reconstruct(self,t): return FloatItem(t.item())
class ItemLists():
"An `ItemList` for each of `train` and `valid` (optional `test`)."
def __init__(self, path:PathOrStr, train:ItemList, valid:ItemList, test:ItemList=None):
self.path,self.train,self.valid,self.test = Path(path),train,valid,test
if isinstance(self.train, LabelList): self.__class__ = LabelLists
def __repr__(self)->str:
return f'{self.__class__.__name__};\n\nTrain: {self.train};\n\nValid: {self.valid};\n\nTest: {self.test}'
def __getattr__(self, k):
ft = getattr(self.train, k)
if not isinstance(ft, Callable): return ft
fv = getattr(self.valid, k)
assert isinstance(fv, Callable)
def _inner(*args, **kwargs):
self.train = ft(*args, **kwargs)
assert isinstance(self.train, LabelList)
self.valid = fv(*args, **kwargs)
self.__class__ = LabelLists
self.process()
return self
return _inner
@property
def lists(self):
res = [self.train,self.valid]
if self.test is not None: res.append(self.test)
return res
def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->'LabelList':
"Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default."
label_cls = self.train.get_label_cls(train_labels, label_cls)
self.train = self.train._label_list(x=self.train, y=label_cls(train_labels, **kwargs))
self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs))
self.__class__ = LabelLists
self.process()
return self
def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the xs of the train and validation set."
if not tfms: return self
self.train.transform(tfms[0], **kwargs)
self.valid.transform(tfms[1], **kwargs)
if self.test: self.test.transform(tfms[1], **kwargs)
return self
def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the ys of the train and validation set."
if not tfms: tfms=(None,None)
self.train.transform_y(tfms[0], **kwargs)
self.valid.transform_y(tfms[1], **kwargs)
if self.test: self.test.transform_y(tfms[1], **kwargs)
return self
class LabelLists(ItemLists):
"A `LabelList` for each of `train` and `valid` (optional `test`)."
def get_processors(self):
"Read the default class processors if none have been set."
procs_x,procs_y = listify(self.train.x._processor),listify(self.train.y._processor)
xp = ifnone(self.train.x.processor, [p(ds=self.train.x) for p in procs_x])
yp = ifnone(self.train.y.processor, [p(ds=self.train.y) for p in procs_y])
return xp,yp
def process(self):
"Process the inner datasets."
xp,yp = self.get_processors()
for i,ds in enumerate(self.lists): ds.process(xp, yp, filter_missing_y=i==0)
return self
def databunch(self, path:PathOrStr=None, **kwargs)->'ImageDataBunch':
"Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`."
path = Path(ifnone(path, self.path))
return self.x._bunch.create(self.train, self.valid, test_ds=self.test, path=path, **kwargs)
def add_test(self, items:Iterator, label:Any=None):
"Add test set containing `items` with an arbitrary `label`"
# if no label passed, use label of first training item
if label is None: label = self.train[0][1].obj
labels = [label for _ in range_of(items)]
if isinstance(items, ItemList): self.test = self.valid.new(items.items, labels, xtra=items.xtra)
else: self.test = self.valid.new(items, labels)
return self
def add_test_folder(self, test_folder:str='test', label:Any=None):
"Add test set containing items from `test_folder` and an arbitrary `label`."
items = self.x.__class__.from_folder(self.path/test_folder)
return self.add_test(items.items, label=label)
class LabelList(Dataset):
"A list of inputs `x` and labels `y` with optional `tfms`."
def __init__(self, x:ItemList, y:ItemList, tfms:TfmList=None, tfm_y:bool=False, **kwargs):
self.x,self.y,self.tfm_y = x,y,tfm_y
self.y.x = x
self.item=None
self.transform(tfms, **kwargs)
def __len__(self)->int: return len(self.x) if self.item is None else 1
@contextmanager
def set_item(self,item):
"For inference, will replace the dataset with one that only contains `item`."
self.item = self.x.process_one(item)
yield None
self.item = None
def __repr__(self)->str:
x = f'{self.x}' # force this to happen first
return f'{self.__class__.__name__}\ny: {self.y}\nx: {x}'
def predict(self, res):
"Delegates predict call on `res` to `self.y`."
return self.y.predict(res)
@property
def c(self): return self.y.c
def new(self, x, y, **kwargs)->'LabelList':
if isinstance(x, ItemList):
return self.__class__(x, y, tfms=self.tfms, tfm_y=self.tfm_y, **self.tfmargs)
else:
return self.new(self.x.new(x, **kwargs), self.y.new(y, **kwargs)).process()
def __getattr__(self,k:str)->Any:
res = getattr(self.x, k, None)
return res if res is not None else getattr(self.y, k)
def __getitem__(self,idxs:Union[int,np.ndarray])->'LabelList':
if isinstance(try_int(idxs), int):
if self.item is None: x,y = self.x[idxs],self.y[idxs]
else: x,y = self.item ,0
if self.tfms:
x = x.apply_tfms(self.tfms, **self.tfmargs)
if hasattr(self, 'tfms_y') and self.tfm_y and self.item is None:
y = y.apply_tfms(self.tfms_y, **{**self.tfmargs_y, 'do_resolve':False})
return x,y
else: return self.new(self.x[idxs], self.y[idxs])
def to_df(self)->None:
"Create `pd.DataFrame` containing `items` from `self.x` and `self.y`."
return pd.DataFrame(dict(x=self.x._relative_item_paths(), y=[str(o) for o in self.y]))
def to_csv(self, dest:str)->None:
"Save `self.to_df()` to a CSV file in `self.path`/`dest`."
self.to_df().to_csv(self.path/dest, index=False)
def export(self, fn:PathOrStr):
"Export the minimal state and save it in `fn` to load an empty version for inference."
state = {'x_cls':self.x.__class__, 'x_proc':self.x.processor,
'y_cls':self.y.__class__, 'y_proc':self.y.processor,
'path':self.path}
pickle.dump(state, open(fn, 'wb'))
@classmethod
def load_empty(cls, fn:PathOrStr, tfms:TfmList=None, tfm_y:bool=False, **kwargs):
"Load the sate in `fn` to create an empty `LabelList` for inference."
state = pickle.load(open(fn, 'rb'))
x = state['x_cls']([], path=state['path'], processor=state['x_proc'])
y = state['y_cls']([], path=state['path'], processor=state['y_proc'])
return cls(x, y, tfms=tfms, tfm_y=tfm_y, **kwargs).process()
def process(self, xp=None, yp=None, filter_missing_y:bool=False):
"Launch the processing on `self.x` and `self.y` with `xp` and `yp`."
self.y.process(yp)
if filter_missing_y and (getattr(self.x, 'filter_missing_y', None)):
filt = array([o is None for o in self.y])
if filt.sum()>0: self.x,self.y = self.x[~filt],self.y[~filt]
self.x.process(xp)
return self
@classmethod
def from_lists(cls, path:PathOrStr, inputs, labels)->'LabelList':
"Create a `LabelList` in `path` with `inputs` and `labels`."
inputs,labels = np.array(inputs),np.array(labels)
return cls(np.concatenate([inputs[:,None], labels[:,None]], 1), path)
def transform(self, tfms:TfmList, tfm_y:bool=None, **kwargs):
"Set the `tfms` and `tfm_y` value to be applied to the inputs and targets."
self.tfms,self.tfmargs = tfms,kwargs
if tfm_y is not None: self.tfm_y,self.tfms_y,self.tfmargs_y = tfm_y,tfms,kwargs
return self
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
self.tfm_y=True
if tfms is None: self.tfms_y,self.tfmargs_y = self.tfms,{**self.tfmargs, **kwargs}
else: self.tfms_y,self.tfmargs_y = tfms,kwargs
return self
@classmethod
def _databunch_load_empty(cls, path, fname:str='export.pkl', tfms:TfmList=None, tfm_y:bool=False, **kwargs):
"Load an empty `DataBunch` from the exported file in `path/fname` with optional `tfms`."
ds = LabelList.load_empty(path/fname, tfms=(None if tfms is None else tfms[1]), tfm_y=tfm_y, **kwargs)
return cls.create(ds,ds,path=path)
DataBunch.load_empty = _databunch_load_empty
| 46.458867 | 141 | 0.649274 |
7954fb50a2b11e88542cf691174dc90a701b0b56 | 54,193 | py | Python | runtime/python/Lib/pathlib.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | runtime/python/Lib/pathlib.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | runtime/python/Lib/pathlib.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from _collections_abc import Sequence
from errno import EINVAL, ENOENT, ENOTDIR, EBADF, ELOOP
from operator import attrgetter
from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
from urllib.parse import quote_from_bytes as urlquote_from_bytes
supports_symlinks = True
if os.name == 'nt':
import nt
if sys.getwindowsversion()[:2] >= (6, 0):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
# EBADF - guard against macOS `stat` throwing EBADF
_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF, ELOOP)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
123, # ERROR_INVALID_NAME - fix for bpo-35306
1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself
)
def _ignore_error(exception):
return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(sys.intern(x))
else:
if rel and rel != '.':
parsed.append(sys.intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
{'CON', 'PRN', 'AUX', 'NUL'} |
{'COM%d' % i for i in range(1, 10)} |
{'LPT%d' % i for i in range(1, 10)}
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2+1:]
else:
return part[:index2], sep, part[index2+1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
previous_s = None
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts = [] # End of the path after the first one not found
while True:
try:
s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s:
return path
else:
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern)).fullmatch
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
if path.endswith(sep):
newpath = path + name
else:
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
stat = os.stat
lstat = os.lstat
open = os.open
listdir = os.listdir
scandir = os.scandir
chmod = os.chmod
if hasattr(os, "lchmod"):
lchmod = os.lchmod
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = os.mkdir
unlink = os.unlink
if hasattr(os, "link"):
link_to = os.link
else:
@staticmethod
def link_to(self, target):
raise NotImplementedError("os.link() not available on this system")
rmdir = os.rmdir
rename = os.rename
replace = os.replace
if nt:
if supports_symlinks:
symlink = os.symlink
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(a, b)
utime = os.utime
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
def _make_selector(pattern_parts, flavour):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts, flavour)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts, flavour):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts, flavour)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts, flavour):
self.name = name
_Selector.__init__(self, child_parts, flavour)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts, flavour):
self.match = flavour.compile_pattern(pat)
_Selector.__init__(self, child_parts, flavour)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
with scandir(parent_path) as scandir_it:
entries = list(scandir_it)
for entry in entries:
if self.dironly:
try:
# "entry.is_dir()" can raise PermissionError
# in some cases (see bpo-38894), which is not
# among the errors ignored by _ignore_error()
if not entry.is_dir():
continue
except OSError as e:
if not _ignore_error(e):
raise
continue
name = entry.name
if self.match(name):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts, flavour):
_Selector.__init__(self, child_parts, flavour)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
try:
with scandir(parent_path) as scandir_it:
entries = list(scandir_it)
for entry in entries:
entry_is_dir = False
try:
entry_is_dir = entry.is_dir()
except OSError as e:
if not _ignore_error(e):
raise
if entry_is_dir and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
except PermissionError:
return
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, scandir):
for p in successor_select(starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
except PermissionError:
return
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""Base class for manipulating paths without I/O.
PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
a = os.fspath(a)
if isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overridden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
return os.fsencode(self)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""
The final component's last suffix, if any.
This includes the leading period. For example: '.txt'
"""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""
A list of the final component's suffixes, if any.
These include the leading periods. For example: ['.tar', '.gz']
"""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
string, remove the suffix from the path.
"""
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix,))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
try:
return self._make_child((key,))
except TypeError:
return NotImplemented
def __rtruediv__(self, key):
try:
return self._from_parts([key] + self._parts)
except TypeError:
return NotImplemented
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
# optimizations in PurePath._parse_args().
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
"""PurePath subclass for non-Windows systems.
On a POSIX system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
"""PurePath subclass that can make system calls.
Path represents a filesystem path but unlike PurePath, also offers
methods to do system calls on path objects. Depending on your system,
instantiating a Path will return either a PosixPath or a WindowsPath
object. You can also instantiate a PosixPath or WindowsPath directly,
but cannot instantiate a WindowsPath on a POSIX system or vice versa.
"""
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
"""
if not pattern:
raise ValueError("Unacceptable pattern: {!r}".format(pattern))
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts), self._flavour)
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
"""
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour)
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
return io.open(self, mode, buffering, encoding, errors, newline,
opener=self._opener)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
with self.open(mode='wb') as f:
return f.write(view)
def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str):
raise TypeError('data must be str, not %s' %
data.__class__.__name__)
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
"""
Create a new directory at this given path.
"""
if self._closed:
self._raise_closed()
try:
self._accessor.mkdir(self, mode)
except FileNotFoundError:
if not parents or self.parent == self:
raise
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self, missing_ok=False):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
try:
self._accessor.unlink(self)
except FileNotFoundError:
if not missing_ok:
raise
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the target path.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
return self.__class__(target)
def replace(self, target):
"""
Rename this path to the target path, overwriting if that path exists.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
return self.__class__(target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the target path.
Note the order of arguments (link, target) is the reverse of os.symlink.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
def link_to(self, target):
"""
Make the target path a hard link pointing to this path.
Note this function does not make this path a hard link to *target*,
despite the implication of the function and argument names. The order
of arguments (target, link) is the reverse of Path.symlink_to, but
matches that of os.link.
"""
if self._closed:
self._raise_closed()
self._accessor.link_to(self, target)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if not _ignore_error(e):
raise
return False
except ValueError:
# Non-encodable path
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_mount(self):
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
parent = Path(self.parent)
try:
parent_dev = parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist
return False
except ValueError:
# Non-encodable path
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root) and
self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
"""Path subclass for non-Windows systems.
On a POSIX system, instantiating a Path should return this object.
"""
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
"""Path subclass for Windows systems.
On a Windows system, instantiating a Path should return this object.
"""
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
def is_mount(self):
raise NotImplementedError("Path.is_mount() is unsupported on this system")
| 34.212753 | 96 | 0.546491 |
7954fc0a9a70a4d486df33b06037f170a9aea005 | 167 | py | Python | ypc/__init__.py | dbeley/youtube_playlist_converter | 3dc28620095ec0f06934a346083386e5b2d308cf | [
"MIT"
] | 18 | 2019-08-24T11:18:46.000Z | 2021-11-16T12:47:10.000Z | ypc/__init__.py | dbeley/youtube_playlist_converter | 3dc28620095ec0f06934a346083386e5b2d308cf | [
"MIT"
] | 18 | 2019-06-28T04:27:05.000Z | 2021-12-27T23:33:03.000Z | ypc/__init__.py | dbeley/youtube_playlist_converter | 3dc28620095ec0f06934a346083386e5b2d308cf | [
"MIT"
] | 2 | 2019-06-12T13:26:07.000Z | 2021-07-13T20:42:49.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
convert spotify/deezer/text playlists to youtube urls or audio/video files
"""
__version__ = "1.7.2"
name = "ypc"
| 15.181818 | 74 | 0.652695 |
7954fc91212869fde54d7bd530f7a6dbefe19ecb | 56 | py | Python | hello.py | ck4xa/cs3240-labdemo | 390a486b843569777b16ac5e27cc14429349eecb | [
"MIT"
] | null | null | null | hello.py | ck4xa/cs3240-labdemo | 390a486b843569777b16ac5e27cc14429349eecb | [
"MIT"
] | null | null | null | hello.py | ck4xa/cs3240-labdemo | 390a486b843569777b16ac5e27cc14429349eecb | [
"MIT"
] | null | null | null | from helper import greeting
msg = "hello"
greeting(msg)
| 14 | 27 | 0.767857 |
7954fe36a22d2ed5520ef965d1480f5e3994c11d | 3,678 | py | Python | shed/writers.py | st3107/shed-streaming | c632fc465d7e11fe0155fbc3e8add1965615dd51 | [
"BSD-3-Clause"
] | 4 | 2017-09-20T16:26:34.000Z | 2020-03-24T15:51:28.000Z | shed/writers.py | st3107/shed-streaming | c632fc465d7e11fe0155fbc3e8add1965615dd51 | [
"BSD-3-Clause"
] | 172 | 2017-07-25T21:36:12.000Z | 2022-02-25T16:05:36.000Z | shed/writers.py | st3107/shed-streaming | c632fc465d7e11fe0155fbc3e8add1965615dd51 | [
"BSD-3-Clause"
] | 6 | 2017-08-08T12:39:18.000Z | 2021-03-29T22:28:47.000Z | from rapidz import Stream
import os
import numpy as np
from event_model import compose_resource
@Stream.register_api()
class Store(Stream):
def __init__(self, upstream, root, writer, resource_kwargs=None, **kwargs):
Stream.__init__(self, upstream, **kwargs)
if writer is None:
writer = {}
self.writer = writer
self.root = root
self.resource_kwargs = resource_kwargs
self.init_writers = {}
self.descriptors = {}
self.not_issued_descriptors = set()
def update(self, x, who=None):
name, doc = x
# selective copy
doc = dict(doc)
if name == "start":
self.init_writers[doc["uid"]] = self.writer(
self.root, doc, self.resource_kwargs
)
if name == "descriptor":
self.descriptors[doc["uid"]] = doc
self.not_issued_descriptors.add(doc["uid"])
return
elif name == "event":
ret = []
writer = self.init_writers[
self.descriptors[doc["descriptor"]]["run_start"]
]
for n, d in writer.write(doc):
# If this is an event and we haven't done this descriptor yet
if (
n == "event"
and doc["descriptor"] in self.not_issued_descriptors
):
# For each of the filled keys let us know that it is backed
# by FILESTORE
descriptor = self.descriptors[doc["descriptor"]]
for k, v in doc["filled"].items():
if not v:
descriptor["data_keys"][k].update(
external="FILESTORE:"
)
ret.append(self.emit(("descriptor", descriptor)))
# We're done with that descriptor now
self.not_issued_descriptors.remove(doc["descriptor"])
ret.append(self.emit((n, d)))
return ret
elif name == "stop":
# clean up our cache (allow multi stops if needed)
self.init_writers.pop(doc["run_start"], None)
return self.emit((name, doc))
class NpyWriter:
spec = "npy"
def __init__(self, root, start, resource_kwargs=None):
if resource_kwargs is None:
resource_kwargs = {}
self.resource_kwargs = resource_kwargs
self.root = root
self.datum_kwargs = {}
self.start = start
def write(self, event):
for k, v in event["data"].items():
if isinstance(v, np.ndarray) and v.shape != ():
resource_path = f'an_data/{event["uid"]}_{k}.npy'
fpath = os.path.join(self.root, resource_path)
os.makedirs(os.path.dirname(fpath), exist_ok=True)
np.save(fpath, v)
resource, compose_datum, compose_datum_page = compose_resource(
start=self.start,
spec=self.spec,
root=self.root,
resource_path=resource_path,
resource_kwargs=self.resource_kwargs,
)
yield "resource", resource
datum = compose_datum(datum_kwargs=self.datum_kwargs)
yield "datum", datum
event["data"][k] = datum["datum_id"]
event["filled"][k] = False
# Don't write a file just for a single number!
elif isinstance(v, np.ndarray) and np.isscalar(v):
event["data"][k] = v.item()
yield "event", event
| 36.058824 | 79 | 0.517673 |
7954fee54bcf1d2d6b63f0c043bf6cd818686879 | 1,218 | py | Python | {{cookiecutter.app_name}}/models.py | cericoda/cookiecutter-django-crud | dceb75c927957eb0cee34b926f1856b0597050b2 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.app_name}}/models.py | cericoda/cookiecutter-django-crud | dceb75c927957eb0cee34b926f1856b0597050b2 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.app_name}}/models.py | cericoda/cookiecutter-django-crud | dceb75c927957eb0cee34b926f1856b0597050b2 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from model_utils.models import StatusModel, TimeStampedModel
from model_utils.choices import Choices
class NonArchived{{ cookiecutter.model_name }}Manager(models.Manager):
def get_query_set(self):
return super(
NonArchived{{ cookiecutter.model_name }}Manager, self
).get_query_set().exclude(status={{ cookiecutter.model_name }}.STATUS.archived)
@python_2_unicode_compatible
class {{ cookiecutter.model_name }}(StatusModel, TimeStampedModel):
STATUS = Choices(('active', 'Active'),
('archived', 'Archived'),
)
name = models.CharField(max_length=255)
all{{ cookiecutter.model_name|lower}} = models.Manager() # Establish this as the default/automatic manager
objects = NonArchived{{ cookiecutter.model_name }}Manager()
def __str__(self):
return '{{ cookiecutter.model_name }} ({})'.format(self.id or 'Unsaved')
def get_absolute_url(self):
return reverse('{{ cookiecutter.model_name|lower }}:detail', args=[str(self.id)])
| 38.0625 | 110 | 0.713465 |
7954ff49c9e9c31a84610d6d3f033b9460688ee2 | 2,819 | py | Python | server/routes/api/v1/discord/tokens/all.py | SpartanPlume/TosurnamentWeb | 6326f51d9cc958b948e0068cbf92dd16294d9d62 | [
"MIT"
] | 1 | 2018-04-23T16:44:50.000Z | 2018-04-23T16:44:50.000Z | server/routes/api/v1/discord/tokens/all.py | SpartanPlume/TosurnamentWeb | 6326f51d9cc958b948e0068cbf92dd16294d9d62 | [
"MIT"
] | null | null | null | server/routes/api/v1/discord/tokens/all.py | SpartanPlume/TosurnamentWeb | 6326f51d9cc958b948e0068cbf92dd16294d9d62 | [
"MIT"
] | null | null | null | """Route to all tokens"""
import requests
import time
import uuid
import json
import constants
from databases.token import Token
from helpers.crypt import hash_str
API_ENDPOINT = 'https://discordapp.com/api/v6'
def get_user_id(handler, data):
headers = {
'Authorization': 'Bearer ' + data["access_token"]
}
try:
r = requests.get(API_ENDPOINT + '/users/@me', headers=headers)
r.raise_for_status()
except requests.exceptions.HTTPError:
handler.logger.exception("Couldn't get the data from Discord API.")
handler.logger.debug(r.text)
handler.send_error(500, "Couldn't get the data from Discord API.")
return None
user = json.loads(r.text)
return user["id"]
def store_token(handler, data):
token = None
session_token = handler.session_token
if session_token:
token = handler.session.query(Token).where(Token.session_token == hash_str(session_token)).first()
if not token:
token = Token()
token.discord_user_id = get_user_id(handler, data)
if not token.discord_user_id:
return None
session_token = str(uuid.uuid4())
token.session_token = session_token
token.expiry_date = str(int(time.time()) + 2592000)
handler.session.add(token)
token.access_token = data["access_token"]
token.token_type = data["token_type"]
token.access_token_expiry_date = str(int(time.time()) + data["expires_in"])
token.refresh_token = data["refresh_token"]
token.scope = data["scope"]
handler.session.update(token)
return session_token
def post(handler, parameters, url_parameters, ids_parameters):
"""POST method"""
if 'code' in parameters:
data = {
'client_id': constants.CLIENT_ID,
'client_secret': constants.CLIENT_SECRET,
'grant_type': 'authorization_code',
'code': parameters['code'],
'redirect_uri': constants.DISCORD_REDIRECT_URI
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
try:
r = requests.post(constants.DISCORD_OAUTH2_ENDPOINT + '/token', data=data, headers=headers)
r.raise_for_status()
except requests.exceptions.HTTPError:
handler.logger.exception("Couldn't post the data to Discord API.")
handler.logger.debug(r.text)
handler.send_error(500, "Couldn't post the data to Discord API.")
return
session_token = store_token(handler, r.json())
if not session_token:
return
data = {
'session_token': session_token
}
handler.send_json(json.dumps(data))
return
handler.logger.debug("No code")
handler.send_error(401, "No code sent.")
| 34.802469 | 106 | 0.640298 |
7955001d563c27bfcf02d34774564ee9dee2c117 | 1,620 | py | Python | tAPP/1/Slicing.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | tAPP/1/Slicing.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | tAPP/1/Slicing.py | ArvinZJC/UofG_PGT_PSD_Python | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | [
"MIT"
] | null | null | null | '''
Description: Exercise 1. Slicing
Version: 1.0.3.20210117
Author: Arvin Zhao
Date: 2021-01-12 04:01:07
Last Editors: Arvin Zhao
LastEditTime: 2021-01-17 12:12:28
'''
def get_line() -> str:
'''
Get input for the first line of a favourite song.
Returns
-------
line : input for the first line of a favourite song
'''
while True:
line = input('Enter the first line of your favourite song: ')
line_len = len(line)
print('Input length:', line_len)
if line_len == 0:
print('Please do enter something!')
else:
break
return line
def get_start() -> int:
'''
Get a starting number.
Returns
-------
start : a starting number
'''
while True:
start = input('Enter a starting number: ')
if len(start.strip()) == 0:
start = None
break
else:
try:
start = int(start)
break
except ValueError:
print('Error! Integer please!')
return start
def get_end() -> int:
'''
Get an ending number.
Returns
-------
end : an ending number
'''
while True:
end = input('Enter an ending number: ')
if len(end.strip()) == 0:
end = None
break
else:
try:
end = int(end)
break
except ValueError:
print('Error! Integer please!')
if __name__ == '__main__':
line = get_line()
print('Your preferred section:', line[get_start():get_end()]) | 19.756098 | 69 | 0.511728 |
79550158e3f26c85be88a468b81c05fb33212755 | 1,245 | py | Python | god/twinter/init_stripped.py | makefu/painload | 9bf3a190d158bbda66a4c80e5ac3d818dd1217fa | [
"WTFPL"
] | 9 | 2018-12-03T12:54:40.000Z | 2021-02-25T22:47:24.000Z | god/twinter/init_stripped.py | makefu/painload | 9bf3a190d158bbda66a4c80e5ac3d818dd1217fa | [
"WTFPL"
] | 1 | 2018-12-02T22:47:34.000Z | 2018-12-02T22:47:34.000Z | god/twinter/init_stripped.py | makefu/painload | 9bf3a190d158bbda66a4c80e5ac3d818dd1217fa | [
"WTFPL"
] | 2 | 2016-04-02T17:18:33.000Z | 2017-03-30T12:38:16.000Z | #!/usr/bin/env python
import os.path
import sys
import tweepy
import re
from socket import *
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_KEY= ''
ACCESS_SECRET = ''
printer = ""
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
mention = api.mentions()[0]
mention_stripped =re.sub(r'[^\w _|@\[\]{}()<>:;!#$%^&+=-]+','',
mention.text.replace("@shacktwinter","").lstrip().rstrip())[:32]
f = open(os.path.dirname(os.path.abspath(sys.argv[0]))+"/msg_file","r+")
last = f.read()
if last == mention_stripped:
print "received old message"
sys.exit(23)
else:
print "received new message: %s" % mention_stripped
s = socket(AF_INET, SOCK_STREAM)
send_message = \
'\x1b%%-12345X@PJL JOB\n@PJL RDYMSG DISPLAY="%s"\n@PJL EOJ\n\x1b%%-12345X' % (mention_stripped, )
s.connect((printer, 9100))
s.send(send_message)
s.close()
f.seek(0)
f.truncate(0)
f.write(mention_stripped)
f.close()
if not mention.user.following:
mention.user.follow()
api.update_status("@%s i appreciate your message '%s' for twinter! Ready Message updated." %
(mention.user.screen_name,mention_stripped.upper()),in_reply_to_status_id=mention.id)
| 30.365854 | 101 | 0.684337 |
7955018c2af4555260bb615bd5f744d892807b4d | 238 | py | Python | Python Files/15c.py | kaskrex/pythonexercises | 2afff8495529f9b1fc1d3f40c03d41227c1f253f | [
"MIT"
] | 1 | 2021-08-10T05:40:35.000Z | 2021-08-10T05:40:35.000Z | Python Files/15c.py | kaskrex/pythonexercises | 2afff8495529f9b1fc1d3f40c03d41227c1f253f | [
"MIT"
] | null | null | null | Python Files/15c.py | kaskrex/pythonexercises | 2afff8495529f9b1fc1d3f40c03d41227c1f253f | [
"MIT"
] | null | null | null | a = "a b c d e fg"
b = ["h", "g", 2, "t"]
#converts string into list
x = a.split(' ')
print(x)
while len(x) != 10:
#debug
b = b.pop()
print("Add", b)
x.append(b)
print("Got %d items now" % len(x))
print(" ".join(a)) | 15.866667 | 38 | 0.491597 |
795502273dc48fdf684fe2e0b8c17dbaab75cc3f | 8,530 | pyw | Python | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import filedialog
from Solve_stages import *
from Text_stages import *
from Analysis_stages import *
from Output import *
root = tk.Tk()
root.title("Cipher program")
root.geometry("1500x500")
root.state("zoomed") #apparently windows only
def getOutputText():
text = ""
for stage in stages:
if stage.check_var.get():
if decode_var.get() == 1: #encode is selected
text = stage.encode(text)
else: #decode is selected
text = stage.decode(text)
return text
def updateOutputText():
text = getOutputText()
right_text.delete(1.0, tk.END)
right_text.insert(tk.END,text)
for stage in stages:
if stage.check_var.get():
stage.updateOutputWidget(text, right_text)
def updateStageEditor():
for child in stage_editor.winfo_children():
child.grid_forget()
stages[selected_stage.get()].display()
root.focus_set()
stage_editor = tk.Frame(root, width=10, height=10)#Size is the same as right_text, they will expand equally to fill the space
stage_editor.grid(row=0, column=0, rowspan=4, sticky="NESW")
stage_editor.grid_propagate(0) #stops the contents of the window affecting the size
stages = []
def addStage(stage):
stages.append(stage)
updateStagesFrame()
stages[len(stages)-1].button.select() #select the newly added stage
updateStageEditor()
updateOutputText()
selected_stage = tk.IntVar()
stages_frame = tk.Frame(root)
stages_frame.grid(row=0, column=1, sticky="NS", columnspan=3)
#Radiobuttons to select between encode and decode
decode_var = tk.IntVar()
decodeBox = tk.Radiobutton(root, text="Decode", variable=decode_var,value=-1,command=updateOutputText)
encodeBox = tk.Radiobutton(root, text="Encode", variable=decode_var,value=1,command=updateOutputText)
decode_var.set(-1) #set to decode as default
decodeBox.grid(row=1,column=1,columnspan=3)
encodeBox.grid(row=2,column=1,columnspan=3)
#Up, Delete, and Down buttons
def stageUp():
if len(stages) > 1 and selected_stage.get() > 1:
stages.insert(selected_stage.get()-1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateOutputText()
def stageDown():
if len(stages) > 1 and selected_stage.get() < len(stages)-1 and selected_stage.get() != 0:
stages.insert(selected_stage.get()+1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateOutputText()
def deleteStage():
if len(stages) > 1 and selected_stage.get() != 0:
stages.pop(selected_stage.get())
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
updateOutputText()
stage_up_button = tk.Button(root, text = "↑",command=stageUp,takefocus=0)
stage_delete_button = tk.Button(root, text = "×",command=deleteStage,takefocus=0)
stage_down_button = tk.Button(root, text = "↓",command=stageDown,takefocus=0)
stage_up_button.grid(row=3, column=1, sticky="ESW")
stage_delete_button.grid(row=3,column=2, sticky="ESW")
stage_down_button.grid(row=3, column=3, sticky="ESW")
#Shortcuts for selecting the next and previous stage
def stageSelectUp(event):
if selected_stage.get() > 0:
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
def stageSelectDown(event):
if selected_stage.get() < len(stages) - 1:
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateStageEditor()
root.bind("<Control-Tab>", stageSelectUp)
root.bind("<Control-Shift-Tab>", stageSelectDown)
root.bind("<Control-Prior>", stageSelectUp) #Control + page up
root.bind("<Control-Next>", stageSelectDown) #Control + page down
def updateStagesFrame():
for button in stages_frame.winfo_children():
button.destroy()
for stage_index in range(len(stages)):
stage = stages[stage_index]
stage.button = tk.Radiobutton(stages_frame, text=stage.name, variable = selected_stage, value = stage_index, command=updateStageEditor,
indicatoron = 0, width = 20, takefocus=0)
stage.check_var = tk.BooleanVar()
stage.check_var.set(True)
stage.checkbox = tk.Checkbutton(stages_frame, variable = stage.check_var, command=updateOutputText, takefocus=0)
if stage_index == 0: #Input cannot be disabled, so don't show the checkbox
stage.checkbox.config(state="disabled")
stage.button.grid(column=1, row=stage_index)
stage.checkbox.grid(column=0, row=stage_index)
updateStagesFrame()
right_text = tk.Text(root, takefocus=0, width=10, height=10, font=("Courier", 10))
right_text.grid(row=0, column=4, rowspan=4, sticky="NESW")
right_text.grid_propagate(0)
tk.Grid.columnconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(root, 2, weight=0)
tk.Grid.columnconfigure(root, 3, weight=0)
tk.Grid.columnconfigure(root, 4, weight=1)
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.rowconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(stage_editor, 0, weight=1)
tk.Grid.rowconfigure(stage_editor, 0, weight=1)
#==========
def add(menu, StageClass): #Helper function to make adding stages neater
menu.add_command(label= StageClass.name,#Takes the name from the class
command=lambda:addStage(StageClass(stage_editor, #passes the stage editor frame to draw to
updateOutputText))) #and a callback for when things change and the output text needs updating
#Functions for file menu operations:
def openCom():
text = ""
try:
with filedialog.askopenfile() as file:
for line in file:
text += line
stages[0].textbox.delete(1.0, tk.END)
stages[0].textbox.insert(tk.END,text)
except AttributeError:#Catch error if the user cancels the dialog
pass
def clearCom():
global stages
stages[0].textbox.delete(1.0, tk.END)
stages = [stages[0]]
selected_stage.set(0)
updateStageEditor()
updateStagesFrame()
def saveCom():
text = getOutputText()
try:
with filedialog.asksaveasfile() as file:
file.write(text)
except AttributeError:
pass
def copyCom():
text = ""
for stage in stages:
text = stage.process(text)
root.clipboard_clear()
root.clipboard_append(text)
root.update()
menu = tk.Menu(root)
file_menu = tk.Menu(menu, tearoff=0)
file_menu.add_command(label="Open", command=openCom)
file_menu.add_command(label="Clear", command = clearCom)
file_menu.add_command(label="Save", command=saveCom)
file_menu.add_command(label="Copy output", command=copyCom)
menu.add_cascade(label="File", menu = file_menu)
ana_menu = tk.Menu(menu, tearoff=0)
add(ana_menu, Length)
add(ana_menu, PlayfairDetect)
add(ana_menu, FrequencyAnalyse)
add(ana_menu, Doubles)
add(ana_menu, Triples)
add(ana_menu, IoC)
add(ana_menu, WordFinder)
add(ana_menu, VigenereKeyword)
add(ana_menu, ColumnarKeyword)
menu.add_cascade(label="Analyse", menu=ana_menu)
text_menu = tk.Menu(menu, tearoff=0)
add(text_menu, Capitalise)
add(text_menu, Lowercase)
add(text_menu, Swapcase)
add(text_menu, Strip)
add(text_menu, RemoveSpaces)
add(text_menu, Reverse)
add(text_menu, Block)
menu.add_cascade(label="Text stage", menu=text_menu)
solve_menu = tk.Menu(menu, tearoff=0)
add(solve_menu, CaesarShift)
add(solve_menu, Substitution)
add(solve_menu, Affine)
add(solve_menu, Vigenere)
#add(solve_menu, Transposition) #this one doesn't work
add(solve_menu, RailFence)
add(solve_menu, Scytale)
add(solve_menu, Morse)
menu.add_cascade(label="Solve stage", menu=solve_menu)
#Functions for the output menu operations
def changeFontSize(change):
currentSize = int(right_text.cget("font").split(" ")[1])
right_text.config(font=("Courier", currentSize + change))
stages[0].textbox.config(font=("Courier", currentSize + change))
output_menu = tk.Menu(menu, tearoff=0)
add(output_menu, OutputHighlight)
add(output_menu, Blank)
output_menu.add_command(label="Increase font size", command=lambda:changeFontSize(1))
output_menu.add_command(label="Decrease font size", command=lambda:changeFontSize(-1))
right_text.tag_configure("highlight", foreground = "red")
menu.add_cascade(label="Output", menu=output_menu)
root.config(menu=menu)
addStage(Input(stage_editor, updateOutputText))
root.mainloop()
| 36.609442 | 149 | 0.710785 |
7955037236fae4b34edd6fb7dc82e21157d942de | 4,462 | py | Python | castlib3/castor/parsing.py | CrankOne/castlib | 9db26c435be1ba51ef82b138542a02f96cb595b4 | [
"MIT"
] | null | null | null | castlib3/castor/parsing.py | CrankOne/castlib | 9db26c435be1ba51ef82b138542a02f96cb595b4 | [
"MIT"
] | null | null | null | castlib3/castor/parsing.py | CrankOne/castlib | 9db26c435be1ba51ef82b138542a02f96cb595b4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Renat R. Dusaev <crank@qcrypt.org>
# Author: Renat R. Dusaev <crank@qcrypt.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import re
import time
import datetime
# 3 columns: the basename of the HSM name, the fileid in the used castor name
# server, and the current status of the found record
rxsCastorHSMFilename = "(/([^/\s]*))+/?"
rxsCastorFileID = "\d+@\w+"
rxsCastorQueryState = "STAGEIN|STAGED|STAGEOUT|CANBEMIGR|INVALID"
rxsQryResponse = r"^" + r"\s*(?P<hsmFilename>" + rxsCastorHSMFilename + ")" \
+ r"\s+(?P<fileID>" + rxsCastorFileID + ")" \
+ r"\s+(?P<status>" + rxsCastorQueryState + ")" \
+ r"$"
# groups: hsmFilename, fileID, status
rxStagerQueryResponse = re.compile( rxsQryResponse )
# groups: requestID
rxStagerGetRequestResponse = re.compile( '.*' ) # TODO
# groups: requestID
rxStagerPutRequestResponse = re.compile( '.*' ) # TODO
rxsNSLSFileClass = r'^\s*(?P<fileClass>\S+)\s+'
rxsNSLSMode = r'(?P<mode>[dDlmrwx\-st]+)\s+'
rxsNSLSDirNentries = r'(?P<dirNEntries>\w+)\s+'
rxsNSLSOwnerNGroup = r'(?P<ownerName>[\w-]+)\s+(?P<ownerGroupName>[\w-]+)\s+'
rxsNSLSFileSize = r'(?P<fileSize>\d+)\s+'
rxsNSLSTimestamp = r'(?P<modMonth>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+(?P<modDate>\d{1,2})\s+((?P<modYear>\d{4})|(?P<modTime>\d{1,2}:\d{1,2}))'
rxsNSLSChecksum = r'(?P<checksum>(:?\s+AD\s+(?P<adler32>[0-9a-fA-F]+)))?\s+'
# groups: todo
rxsNSLS = rxsNSLSFileClass \
+ rxsNSLSMode \
+ rxsNSLSDirNentries \
+ rxsNSLSOwnerNGroup \
+ rxsNSLSFileSize \
+ rxsNSLSTimestamp \
+ rxsNSLSChecksum \
+ r'(?P<filename>.+)$'
rxNSLS = re.compile( rxsNSLS, re.M )
rxsStagerQueryError = r'(?:Error\s+(?P<errorCode>\d+))\/(?P<errorMessage>.+)'
rxStagerQueryError = re.compile( rxsStagerQueryError )
rxsStagerSubrequesFailure = r'(?:(?P<filename>\/[\w\.\/-]+)\s+' \
r'(?P<subrequestStatus>SUBREQUEST_(?P<subreqStatPart>\w+))(\s+' \
r'((?P<error>(?P<errorCode>\d+)\s+' \
r'(?P<errorMessage>.+))))?)'
rxsStagerRequestID = r'(:?Stager request ID: (?P<StagerRequestUUID>([0-9a-fA-F\-]+)+))'
rxStagerSubrequesFailure = re.compile(
'^' + rxsStagerSubrequesFailure + '|' + rxsStagerRequestID + '$',
re.M )
rfstatTimeReXs = \
"^Last\s(?P<type>access|modify|stat\.\smod\.)\s+\:\s*(?P<timeString>.*)\s*$"
def obtain_rfstat_timestamps( rfstatOut ):
"""
Obtains access/modification/metadata modification timestamps from
rfstat util outout.
"""
res = {}
rfstatTimeReX = re.compile( rfstatTimeReXs )
for l in rfstatOut.splitlines():
m = rfstatTimeReX.match(l)
if m:
typeDT = None
valDT = None
for k, v in m.groupdict().iteritems():
if 'type' == k:
if 'access' == v:
typeDT = 'accTimestamp'
elif 'modify' == v:
typeDT = 'modTimestamp'
elif 'stat. mod.' == v:
typeDT = 'statModTimestamp'
else:
valDT = int(time.mktime(datetime.datetime.strptime(v, "%a %b %d %H:%M:%S %Y").timetuple()))
res[typeDT] = valDT
return res
| 42.09434 | 161 | 0.616988 |
7955039792878fe95752ad867f4e98cb408398c1 | 6,770 | py | Python | py61850/types/base.py | arthurazs/py61850 | ba9c5f40ef21bfecd14a8d380e9ff512da9ba5bf | [
"MIT"
] | 3 | 2020-09-21T02:13:58.000Z | 2021-09-18T02:32:56.000Z | py61850/types/base.py | arthurazs/py61850 | ba9c5f40ef21bfecd14a8d380e9ff512da9ba5bf | [
"MIT"
] | null | null | null | py61850/types/base.py | arthurazs/py61850 | ba9c5f40ef21bfecd14a8d380e9ff512da9ba5bf | [
"MIT"
] | 2 | 2020-12-29T15:09:50.000Z | 2022-01-04T16:19:48.000Z | from abc import ABC
from typing import Any, Optional, Tuple, Union
from struct import pack as s_pack
from py61850.utils.errors import raise_type
class Generic(ABC):
def _parse(self, anything: Any) -> Tuple[Optional[bytes], Any]:
unpacked = anything
if not isinstance(anything, bytes):
try:
unpacked = anything[0]
except (TypeError, IndexError):
pass
if isinstance(unpacked, bytes):
raw_value = anything
value = self._decode(raw_value)
else:
value = anything
raw_value = self._encode(value)
if raw_value is None or value is None:
return None, None
return raw_value, value
@staticmethod
def _encode(value: Any) -> bytes:
raise NotImplementedError # pragma: no cover
@staticmethod
def _decode(raw_value: bytes) -> Any:
raise NotImplementedError # pragma: no cover
class Base(Generic, ABC):
"""This is the base for any IEC data type.
This class does not care for encoding, nor decoding the value field,
which should be handled by the subclass.
Thus, the `Base` expects the already encoded value field, but handles
the encoding/decoding of both tag and length field.
Args:
raw_tag: The encoded tag field.
raw_value: The encoded value field.
Raises:
TypeError: If `raw_tag` type is different from `bytes`.
TypeError: If `raw_value` type is different from `bytes` and `NoneType`.
ValueError: If `raw_tag` length is different from 1.
"""
def __init__(self, raw_tag: bytes, raw_value: Optional[bytes] = None) -> None:
self._set_tag(raw_tag)
self._set_raw_value(raw_value)
self._parent = None
def __bytes__(self) -> bytes:
"""Return the encoded data, including all existing fields.
If value field is `None`: return tag + length.
If value field is not `None`: return tag + length + value.
"""
if self._raw_value is None:
return self._raw_tag + self._raw_length
return self._raw_tag + self._raw_length + self._raw_value
def __len__(self) -> int:
"""Return the length of the encoded data, including all existing fields.
If value field is `None`: return tag + length.
If value field is not `None`: return tag + length + value.
Note:
For the length field, use the `length` property.
"""
if self.raw_value is None:
return len(self.raw_tag) + len(self.raw_length)
return len(self.raw_tag) + len(self.raw_length) + len(self.raw_value)
def set_parent(self, parent: 'Base'):
if not isinstance(parent, Base):
raise_type('parent', Base, type(parent))
self._parent = parent
def _update(self, caller: 'Base'):
byte_stream = b''
for value in self._value:
byte_stream += bytes(value)
self._set_raw_value(byte_stream)
def _set_tag(self, raw_tag: bytes) -> None:
# assert `raw_tag` is `bytes` and has length of 1, then set `raw_tag` and `tag`
if not isinstance(raw_tag, bytes):
raise_type('raw_tag', bytes, type(raw_tag))
if len(raw_tag) != 1:
raise ValueError('raw_tag out of supported length')
self._raw_tag = raw_tag
# self._tag = raw_tag.hex()
self._tag = self.__class__.__name__
@staticmethod
def unpack_extra_value(value_a: Union[bytes, Tuple[bytes, Any]],
value_b: Union[Any, Tuple[Any, Any]]) -> Tuple[bytes, Any, Any]:
try:
value_a, value_c = value_a
except ValueError:
value_b, value_c = value_b
if value_c is None:
value_b, value_c = value_b
return value_a, value_b, value_c
@property
def tag(self) -> str:
"""The class name."""
return self._tag
@property
def raw_tag(self) -> bytes:
"""The encoded tag field."""
return self._raw_tag
def _set_length(self, length: int) -> None:
"""Encode length according to ASN.1 BER.
`raw_length` will be of 1 byte long if < 128.
If it's 2+ bytes long, the first byte indicates how many
bytes follows.
Example:
128 == b'\x81\x80', where 0x81 indicates 1 extra byte
for the length, and 0x80 is the length itself.
Args:
length: The length to be encoded.
Raises:
ValueError: If `length` is greater than `0xFFFF`.
"""
# NOTE enable extra_length > 2?
# NOTE indefinite length?
if 0 <= length < 0x80:
self._raw_length = s_pack('!B', length)
elif 0x80 <= length <= 0xFF:
self._raw_length = s_pack('!BB', 0x81, length)
elif 0xFF < length <= 0xFFFF:
self._raw_length = s_pack('!BH', 0x82, length)
else:
raise ValueError(f'data length greater than {0xFFFF}')
self._length = length
@property
def raw_value(self) -> Optional[bytes]:
"""The encoded value field."""
return self._raw_value
def _set_raw_value(self, raw_value: Optional[bytes]) -> None:
"""Set raw value field.
Note:
This method does not encode the value field.
This should be done by the subclass using
the `_encode_value()` method.
Args:
raw_value: The raw value to be set.
Raises:
ValueError: If the length of `raw_value` is greater than `0xFFFF`.
TypeError: If `raw_value` type is different from `bytes` and `NoneType`.
"""
if raw_value is None:
self._raw_value = raw_value
self._set_length(0)
else:
if not isinstance(raw_value, bytes):
raise_type('raw_value', bytes, type(raw_value))
self._raw_value = raw_value
self._set_length(len(raw_value))
try:
self._parent._update(self)
except AttributeError:
pass
@property
def raw_length(self):
"""The encoded length field.
Note:
For the full data length, including the tag and length fields, use the `len` method.
"""
return self._raw_length
@property
def length(self):
"""The decoded length field"""
return self._length
@property
def value(self) -> Any:
"""The decoded value field"""
return self._value
@value.setter
def value(self, value: Any) -> None:
raw_value = self._encode(value)
self._set_raw_value(raw_value)
self._value = value
| 31.488372 | 96 | 0.593058 |
795503d8e8fac7568e6436b1bc5402fd8c93de17 | 5,309 | py | Python | tests/unit/utils/test_http.py | lnattrass/salt | fdf72b9fc9a0531daacb4bc5cda9e5ba980c0852 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_http.py | lnattrass/salt | fdf72b9fc9a0531daacb4bc5cda9e5ba980c0852 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/test_http.py | lnattrass/salt | fdf72b9fc9a0531daacb4bc5cda9e5ba980c0852 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:codeauthor: Nicole Thomas <nicole@saltstack.com>
"""
from __future__ import absolute_import, print_function, unicode_literals
import socket
from contextlib import closing
import salt.utils.http as http
from tests.support.helpers import MirrorPostHandler, Webserver, slowTest
from tests.support.unit import TestCase
class HTTPTestCase(TestCase):
"""
Unit TestCase for the salt.utils.http module.
"""
@classmethod
def setUpClass(cls):
cls.post_webserver = Webserver(handler=MirrorPostHandler)
cls.post_webserver.start()
cls.post_web_root = cls.post_webserver.web_root
@classmethod
def tearDownClass(cls):
cls.post_webserver.stop()
del cls.post_webserver
# sanitize_url tests
def test_sanitize_url_hide_fields_none(self):
"""
Tests sanitizing a url when the hide_fields kwarg is None.
"""
mock_url = "https://api.testing.com/?&foo=bar&test=testing"
ret = http.sanitize_url(mock_url, hide_fields=None)
self.assertEqual(ret, mock_url)
def test_sanitize_url_no_elements(self):
"""
Tests sanitizing a url when no elements should be sanitized.
"""
mock_url = "https://api.testing.com/?&foo=bar&test=testing"
ret = http.sanitize_url(mock_url, [""])
self.assertEqual(ret, mock_url)
def test_sanitize_url_single_element(self):
"""
Tests sanitizing a url with only a single element to be sanitized.
"""
mock_url = (
"https://api.testing.com/?&keep_it_secret=abcdefghijklmn"
"&api_action=module.function"
)
mock_ret = (
"https://api.testing.com/?&keep_it_secret=XXXXXXXXXX&"
"api_action=module.function"
)
ret = http.sanitize_url(mock_url, ["keep_it_secret"])
self.assertEqual(ret, mock_ret)
def test_sanitize_url_multiple_elements(self):
"""
Tests sanitizing a url with multiple elements to be sanitized.
"""
mock_url = (
"https://api.testing.com/?rootPass=badpassword%21"
"&skipChecks=True&api_key=abcdefghijklmn"
"&NodeID=12345&api_action=module.function"
)
mock_ret = (
"https://api.testing.com/?rootPass=XXXXXXXXXX"
"&skipChecks=True&api_key=XXXXXXXXXX"
"&NodeID=12345&api_action=module.function"
)
ret = http.sanitize_url(mock_url, ["api_key", "rootPass"])
self.assertEqual(ret, mock_ret)
# _sanitize_components tests
def test_sanitize_components_no_elements(self):
"""
Tests when zero elements need to be sanitized.
"""
mock_component_list = ["foo=bar", "bar=baz", "hello=world"]
mock_ret = "foo=bar&bar=baz&hello=world&"
ret = http._sanitize_url_components(mock_component_list, "api_key")
self.assertEqual(ret, mock_ret)
def test_sanitize_components_one_element(self):
"""
Tests a single component to be sanitized.
"""
mock_component_list = ["foo=bar", "api_key=abcdefghijklmnop"]
mock_ret = "foo=bar&api_key=XXXXXXXXXX&"
ret = http._sanitize_url_components(mock_component_list, "api_key")
self.assertEqual(ret, mock_ret)
def test_sanitize_components_multiple_elements(self):
"""
Tests two componenets to be sanitized.
"""
mock_component_list = ["foo=bar", "foo=baz", "api_key=testing"]
mock_ret = "foo=XXXXXXXXXX&foo=XXXXXXXXXX&api_key=testing&"
ret = http._sanitize_url_components(mock_component_list, "foo")
self.assertEqual(ret, mock_ret)
@slowTest
def test_query_null_response(self):
"""
This tests that we get a null response when raise_error=False and the
host/port cannot be reached.
"""
host = "127.0.0.1"
# Find unused port
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind((host, 0))
port = sock.getsockname()[1]
url = "http://{host}:{port}/".format(host=host, port=port)
result = http.query(url, raise_error=False)
assert result == {"body": None}, result
def test_query_error_handling(self):
ret = http.query("http://127.0.0.1:0")
self.assertTrue(isinstance(ret, dict))
self.assertTrue(isinstance(ret.get("error", None), str))
ret = http.query("http://myfoobardomainthatnotexist")
self.assertTrue(isinstance(ret, dict))
self.assertTrue(isinstance(ret.get("error", None), str))
def test_requests_multipart_formdata_post(self):
"""
Test handling of a multipart/form-data POST using the requests backend
"""
match_this = '{0}\r\nContent-Disposition: form-data; name="fieldname_here"\r\n\r\nmydatahere\r\n{0}--\r\n'
ret = http.query(
self.post_web_root,
method="POST",
data="mydatahere",
formdata=True,
formdata_fieldname="fieldname_here",
backend="requests",
)
body = ret.get("body", "")
boundary = body[: body.find("\r")]
self.assertEqual(body, match_this.format(boundary))
| 35.15894 | 114 | 0.630062 |
79550456108b442dc25b5b1d93e12cfa8a2540de | 1,840 | py | Python | csi-monitor-gui/CsiDMClient/Main.py | brunosoaresds/csi-monitor | bb89cd50688f48d3f236d9654be7d845b8ba873c | [
"MIT"
] | 38 | 2018-05-11T18:55:42.000Z | 2022-03-02T03:37:13.000Z | csi-monitor-gui/CsiDMClient/Main.py | brunosoaresds/csi-monitor | bb89cd50688f48d3f236d9654be7d845b8ba873c | [
"MIT"
] | 10 | 2018-10-22T13:06:42.000Z | 2021-11-22T12:03:10.000Z | csi-monitor-gui/CsiDMClient/Main.py | brunosoaresds/csi-monitor | bb89cd50688f48d3f236d9654be7d845b8ba873c | [
"MIT"
] | 11 | 2018-10-24T11:54:47.000Z | 2021-12-14T07:07:24.000Z | #!/usr/bin/env python
import getopt
import sys
import glob
import os
from Server import Server
def usage():
print("/*************************************/")
print("/* CSI Device Manager Client usage: */")
print("/*************************************/")
print("$ Main.py --port=<server_port> --sender=<sender CsiDM> --receiver=<receiver CsiDM>")
print("/*************************************/")
print("/******* Parameters: ********/")
print("/*************************************/")
print("- <port>: The server socket port which the device manager client will uses.")
print("- <sender>: The sender CSI Device Manager address, ip_address:port pattern (Ex: 192.168.0.1:3000)")
print("- <receiver>: The receiver CSI Device Manager address, ip_address:port pattern (Ex: 192.168.0.2:3000)")
print("/*************************************/")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], [], ["port=", "sender=", "receiver="])
except getopt.GetoptError as err:
# print help information and exit:
usage()
print str(err)
sys.exit(2)
port = None
sender = None
receiver = None
for o, a in opts:
if o == "--port":
port = int(a)
elif o == "--sender":
sender = a
elif o == "--receiver":
receiver = a
else:
assert False, "unhandled option"
if port is None or sender is None or receiver is None:
usage()
sys.exit(2)
# Clear output_data folder
filesDir = os.path.dirname(os.path.abspath(__file__)) + '/output_data'
files = glob.glob(filesDir + '/*')
for file in files:
os.remove(file)
server = Server(port, sender, receiver)
server.runServer()
if __name__ == "__main__":
main()
| 29.206349 | 114 | 0.515217 |
79550538db50a8a03f70b7e6e3ec0b92a9a02833 | 14,455 | py | Python | lib/spack/spack/patch.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | lib/spack/spack/patch.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | lib/spack/spack/patch.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import hashlib
import os
import os.path
import inspect
import llnl.util.filesystem
import llnl.util.lang
import spack.error
import spack.fetch_strategy as fs
import spack.repo
import spack.stage
import spack.util.spack_json as sjson
import spack
from spack.util.compression import allowed_archive
from spack.util.crypto import checksum, Checker
from spack.util.executable import which
def apply_patch(stage, patch_path, level=1, working_dir='.'):
"""Apply the patch at patch_path to code in the stage.
Args:
stage (spack.stage.Stage): stage with code that will be patched
patch_path (str): filesystem location for the patch to apply
level (int, optional): patch level (default 1)
working_dir (str): relative path *within* the stage to change to
(default '.')
"""
patch = which("patch", required=True)
with llnl.util.filesystem.working_dir(stage.source_path):
patch('-s',
'-p', str(level),
'-i', patch_path,
'-d', working_dir)
class Patch(object):
"""Base class for patches.
Arguments:
pkg (str): the package that owns the patch
The owning package is not necessarily the package to apply the patch
to -- in the case where a dependent package patches its dependency,
it is the dependent's fullname.
"""
def __init__(self, pkg, path_or_url, level, working_dir):
# validate level (must be an integer >= 0)
if not isinstance(level, int) or not level >= 0:
raise ValueError("Patch level needs to be a non-negative integer.")
# Attributes shared by all patch subclasses
self.owner = pkg.fullname
self.path_or_url = path_or_url # needed for debug output
self.path = None # must be set before apply()
self.level = level
self.working_dir = working_dir
def fetch(self, stage):
"""Fetch the patch in case of a UrlPatch
Args:
stage: stage for the package that needs to be patched
"""
def clean(self):
"""Clean up the patch stage in case of a UrlPatch"""
def apply(self, stage):
"""Apply a patch to source in a stage.
Arguments:
stage (spack.stage.Stage): stage where source code lives
"""
assert self.path, (
"Path for patch not set in apply: %s" % self.path_or_url)
if not os.path.isfile(self.path):
raise NoSuchPatchError("No such patch: %s" % self.path)
apply_patch(stage, self.path, self.level, self.working_dir)
def cache(self):
return None
def to_dict(self):
"""Partial dictionary -- subclases should add to this."""
return {
'owner': self.owner,
'sha256': self.sha256,
'level': self.level,
'working_dir': self.working_dir,
}
class FilePatch(Patch):
"""Describes a patch that is retrieved from a file in the repository.
Arguments:
pkg (str): the class object for the package that owns the patch
relative_path (str): path to patch, relative to the repository
directory for a package.
level (int): level to pass to patch command
working_dir (str): path within the source directory where patch
should be applied
"""
def __init__(self, pkg, relative_path, level, working_dir,
ordering_key=None):
self.relative_path = relative_path
# patches may be defined by relative paths to parent classes
# search mro to look for the file
abs_path = None
# At different times we call FilePatch on instances and classes
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
for cls in inspect.getmro(pkg_cls):
if not hasattr(cls, 'module'):
# We've gone too far up the MRO
break
# Cannot use pkg.package_dir because it's a property and we have
# classes, not instances.
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
path = os.path.join(pkg_dir, self.relative_path)
if os.path.exists(path):
abs_path = path
break
if abs_path is None:
msg = 'FilePatch: Patch file %s for ' % relative_path
msg += 'package %s.%s does not exist.' % (pkg.namespace, pkg.name)
raise ValueError(msg)
super(FilePatch, self).__init__(pkg, abs_path, level, working_dir)
self.path = abs_path
self._sha256 = None
self.ordering_key = ordering_key
@property
def sha256(self):
if self._sha256 is None:
self._sha256 = checksum(hashlib.sha256, self.path)
return self._sha256
def to_dict(self):
return llnl.util.lang.union_dicts(
super(FilePatch, self).to_dict(),
{'relative_path': self.relative_path})
class UrlPatch(Patch):
"""Describes a patch that is retrieved from a URL.
Arguments:
pkg (str): the package that owns the patch
url (str): URL where the patch can be fetched
level (int): level to pass to patch command
working_dir (str): path within the source directory where patch
should be applied
"""
def __init__(self, pkg, url, level=1, working_dir='.', ordering_key=None,
**kwargs):
super(UrlPatch, self).__init__(pkg, url, level, working_dir)
self.url = url
self.ordering_key = ordering_key
self.archive_sha256 = kwargs.get('archive_sha256')
if allowed_archive(self.url) and not self.archive_sha256:
raise PatchDirectiveError(
"Compressed patches require 'archive_sha256' "
"and patch 'sha256' attributes: %s" % self.url)
self.sha256 = kwargs.get('sha256')
if not self.sha256:
raise PatchDirectiveError("URL patches require a sha256 checksum")
# TODO: this function doesn't use the stage arg
def fetch(self, stage):
"""Retrieve the patch in a temporary stage and compute self.path
Args:
stage: stage for the package that needs to be patched
"""
# use archive digest for compressed archives
fetch_digest = self.sha256
if self.archive_sha256:
fetch_digest = self.archive_sha256
fetcher = fs.URLFetchStrategy(self.url, fetch_digest,
expand=bool(self.archive_sha256))
per_package_ref = os.path.join(
self.owner.split('.')[-1], os.path.basename(self.url))
# Reference starting with "spack." is required to avoid cyclic imports
mirror_ref = spack.mirror.mirror_archive_paths(
fetcher, per_package_ref)
self.stage = spack.stage.Stage(fetcher, mirror_paths=mirror_ref)
self.stage.create()
self.stage.fetch()
self.stage.check()
root = self.stage.path
if self.archive_sha256:
self.stage.expand_archive()
root = self.stage.source_path
files = os.listdir(root)
if not files:
if self.archive_sha256:
raise NoSuchPatchError(
"Archive was empty: %s" % self.url)
else:
raise NoSuchPatchError(
"Patch failed to download: %s" % self.url)
self.path = os.path.join(root, files.pop())
if not os.path.isfile(self.path):
raise NoSuchPatchError(
"Archive %s contains no patch file!" % self.url)
# for a compressed archive, Need to check the patch sha256 again
# and the patch is in a directory, not in the same place
if self.archive_sha256 and spack.config.get('config:checksum'):
checker = Checker(self.sha256)
if not checker.check(self.path):
raise fs.ChecksumError(
"sha256 checksum failed for %s" % self.path,
"Expected %s but got %s" % (self.sha256, checker.sum))
def cache(self):
return self.stage
def clean(self):
self.stage.destroy()
def to_dict(self):
data = super(UrlPatch, self).to_dict()
data['url'] = self.url
if self.archive_sha256:
data['archive_sha256'] = self.archive_sha256
return data
def from_dict(dictionary):
"""Create a patch from json dictionary."""
owner = dictionary.get('owner')
if 'owner' not in dictionary:
raise ValueError('Invalid patch dictionary: %s' % dictionary)
pkg = spack.repo.get(owner)
if 'url' in dictionary:
return UrlPatch(
pkg,
dictionary['url'],
dictionary['level'],
dictionary['working_dir'],
sha256=dictionary['sha256'],
archive_sha256=dictionary.get('archive_sha256'))
elif 'relative_path' in dictionary:
patch = FilePatch(
pkg,
dictionary['relative_path'],
dictionary['level'],
dictionary['working_dir'])
# If the patch in the repo changes, we cannot get it back, so we
# just check it and fail here.
# TODO: handle this more gracefully.
sha256 = dictionary['sha256']
checker = Checker(sha256)
if not checker.check(patch.path):
raise fs.ChecksumError(
"sha256 checksum failed for %s" % patch.path,
"Expected %s but got %s" % (sha256, checker.sum),
"Patch may have changed since concretization.")
return patch
else:
raise ValueError("Invalid patch dictionary: %s" % dictionary)
class PatchCache(object):
"""Index of patches used in a repository, by sha256 hash.
This allows us to look up patches without loading all packages. It's
also needed to properly implement dependency patching, as need a way
to look up patches that come from packages not in the Spec sub-DAG.
The patch index is structured like this in a file (this is YAML, but
we write JSON)::
patches:
sha256:
namespace1.package1:
<patch json>
namespace2.package2:
<patch json>
... etc. ...
"""
def __init__(self, data=None):
if data is None:
self.index = {}
else:
if 'patches' not in data:
raise IndexError('invalid patch index; try `spack clean -m`')
self.index = data['patches']
@classmethod
def from_json(cls, stream):
return PatchCache(sjson.load(stream))
def to_json(self, stream):
sjson.dump({'patches': self.index}, stream)
def patch_for_package(self, sha256, pkg):
"""Look up a patch in the index and build a patch object for it.
Arguments:
sha256 (str): sha256 hash to look up
pkg (spack.package.Package): Package object to get patch for.
We build patch objects lazily because building them requires that
we have information about the package's location in its repo.
"""
sha_index = self.index.get(sha256)
if not sha_index:
raise NoSuchPatchError(
"Couldn't find patch with sha256: %s" % sha256)
patch_dict = sha_index.get(pkg.fullname)
if not patch_dict:
raise NoSuchPatchError(
"Couldn't find patch for package %s with sha256: %s"
% (pkg.fullname, sha256))
# add the sha256 back (we take it out on write to save space,
# because it's the index key)
patch_dict = dict(patch_dict)
patch_dict['sha256'] = sha256
return from_dict(patch_dict)
def update_package(self, pkg_fullname):
# remove this package from any patch entries that reference it.
empty = []
for sha256, package_to_patch in self.index.items():
remove = []
for fullname, patch_dict in package_to_patch.items():
if patch_dict['owner'] == pkg_fullname:
remove.append(fullname)
for fullname in remove:
package_to_patch.pop(fullname)
if not package_to_patch:
empty.append(sha256)
# remove any entries that are now empty
for sha256 in empty:
del self.index[sha256]
# update the index with per-package patch indexes
pkg = spack.repo.get(pkg_fullname)
partial_index = self._index_patches(pkg)
for sha256, package_to_patch in partial_index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
def update(self, other):
"""Update this cache with the contents of another."""
for sha256, package_to_patch in other.index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
@staticmethod
def _index_patches(pkg_class):
index = {}
# Add patches from the class
for cond, patch_list in pkg_class.patches.items():
for patch in patch_list:
patch_dict = patch.to_dict()
patch_dict.pop('sha256') # save some space
index[patch.sha256] = {pkg_class.fullname: patch_dict}
# and patches on dependencies
for name, conditions in pkg_class.dependencies.items():
for cond, dependency in conditions.items():
for pcond, patch_list in dependency.patches.items():
for patch in patch_list:
dspec = spack.repo.get(dependency.spec.name)
patch_dict = patch.to_dict()
patch_dict.pop('sha256') # save some space
index[patch.sha256] = {dspec.fullname: patch_dict}
return index
class NoSuchPatchError(spack.error.SpackError):
"""Raised when a patch file doesn't exist."""
class PatchDirectiveError(spack.error.SpackError):
"""Raised when the wrong arguments are suppled to the patch directive."""
| 34.58134 | 79 | 0.604773 |
7955065c642fbca718d3d93e7a775f518cc4ed8d | 4,699 | py | Python | polaris/polaris/settings.py | lijamie98/django-polaris | 5cdda7434281988deb761b34f574dfcaf7ae9f5d | [
"Apache-2.0"
] | null | null | null | polaris/polaris/settings.py | lijamie98/django-polaris | 5cdda7434281988deb761b34f574dfcaf7ae9f5d | [
"Apache-2.0"
] | null | null | null | polaris/polaris/settings.py | lijamie98/django-polaris | 5cdda7434281988deb761b34f574dfcaf7ae9f5d | [
"Apache-2.0"
] | null | null | null | """
Polaris-specific settings. This is not django.conf.settings.
"""
import os
import environ
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from stellar_sdk.server import Server
from stellar_sdk.keypair import Keypair
def env_or_settings(variable, required=True, bool=False, list=False, int=False):
try:
if bool:
return env.bool(variable)
elif list:
return env.list(variable)
elif int:
return env.int(variable)
else:
return env(variable)
except ImproperlyConfigured as e:
if hasattr(settings, "POLARIS_" + variable):
return getattr(settings, "POLARIS_" + variable)
elif required:
raise e
else:
return None
env = environ.Env()
env_file = os.path.join(getattr(settings, "BASE_DIR", ""), ".env")
if os.path.exists(env_file):
env.read_env(env_file)
elif hasattr(settings, "POLARIS_ENV_PATH"):
if os.path.exists(settings.POLARIS_ENV_PATH):
env.read_env(settings.POLARIS_ENV_PATH)
else:
raise ImproperlyConfigured(
f"Could not find env file at {settings.POLARIS_ENV_PATH}"
)
accepted_seps = ["sep-1", "sep-6", "sep-6", "sep-10", "sep-12", "sep-24", "sep-31"]
ACTIVE_SEPS = env_or_settings("ACTIVE_SEPS", list=True)
for i, sep in enumerate(ACTIVE_SEPS):
if sep.lower() not in accepted_seps:
raise ImproperlyConfigured(
f"Unrecognized value in ACTIVE_SEPS list: {sep}; Accepted values: {accepted_seps}"
)
ACTIVE_SEPS[i] = sep.lower()
SIGNING_SEED, SIGNING_KEY = None, None
if "sep-10" in ACTIVE_SEPS:
SIGNING_SEED = env_or_settings("SIGNING_SEED")
try:
SIGNING_KEY = Keypair.from_secret(SIGNING_SEED).public_key
except ValueError:
raise ImproperlyConfigured("Invalid SIGNING_SEED")
SERVER_JWT_KEY = None
if any(sep in ACTIVE_SEPS for sep in ["sep-10", "sep-24"]):
SERVER_JWT_KEY = env_or_settings("SERVER_JWT_KEY")
STELLAR_NETWORK_PASSPHRASE = (
env_or_settings("STELLAR_NETWORK_PASSPHRASE", required=False)
or "Test SDF Network ; September 2015"
)
HORIZON_URI = (
env_or_settings("HORIZON_URI", required=False)
or "https://horizon-testnet.stellar.org"
)
if not HORIZON_URI.startswith("http"):
raise ImproperlyConfigured("HORIZON_URI must include a protocol (http or https)")
HORIZON_SERVER = Server(horizon_url=HORIZON_URI)
LOCAL_MODE = env_or_settings("LOCAL_MODE", bool=True, required=False) or False
HOST_URL = env_or_settings("HOST_URL")
if not HOST_URL.startswith("http"):
raise ImproperlyConfigured("HOST_URL must include a protocol (http or https)")
elif LOCAL_MODE and HOST_URL.startswith("https"):
raise ImproperlyConfigured("HOST_URL uses HTTPS but LOCAL_MODE only supports HTTP")
elif not LOCAL_MODE and not HOST_URL.startswith("https"):
raise ImproperlyConfigured("HOST_URL uses HTTP but LOCAL_MODE is off")
SEP10_HOME_DOMAINS = env_or_settings(
"SEP10_HOME_DOMAINS", list=True, required=False
) or [urlparse(HOST_URL).netloc]
if any(d.startswith("http") for d in SEP10_HOME_DOMAINS):
raise ImproperlyConfigured("SEP10_HOME_DOMAINS must only be hostnames")
MAX_TRANSACTION_FEE_STROOPS = env_or_settings(
"MAX_TRANSACTION_FEE_STROOPS", int=True, required=False
)
CALLBACK_REQUEST_TIMEOUT = (
env_or_settings("CALLBACK_REQUEST_TIMEOUT", int=True, required=False) or 3
)
CALLBACK_REQUEST_DOMAIN_DENYLIST = (
env_or_settings("CALLBACK_REQUEST_DOMAIN_DENYLIST", list=True, required=False) or []
)
SEP6_USE_MORE_INFO_URL = (
env_or_settings("SEP6_USE_MORE_INFO_URL", bool=True, required=False) or False
)
SEP10_CLIENT_ATTRIBUTION_REQUIRED = env_or_settings(
"SEP10_CLIENT_ATTRIBUTION_REQUIRED", bool=True, required=False
)
SEP10_CLIENT_ATTRIBUTION_REQUEST_TIMEOUT = (
env_or_settings(
"SEP10_CLIENT_ATTRIBUTION_REQUEST_TIMEOUT", int=True, required=False
)
or 3
)
SEP10_CLIENT_ATTRIBUTION_ALLOWLIST = env_or_settings(
"SEP10_CLIENT_ATTRIBUTION_ALLOWLIST", list=True, required=False
)
SEP10_CLIENT_ATTRIBUTION_DENYLIST = env_or_settings(
"SEP10_CLIENT_ATTRIBUTION_DENYLIST", list=True, required=False
)
ADDITIVE_FEES_ENABLED = (
env_or_settings("ADDITIVE_FEES_ENABLED", bool=True, required=False) or False
)
# Constants
OPERATION_DEPOSIT = "deposit"
OPERATION_WITHDRAWAL = "withdraw"
ACCOUNT_STARTING_BALANCE = str(2.01)
INTERACTIVE_JWT_EXPIRATION = (
env_or_settings("INTERACTIVE_JWT_EXPIRATION", int=True, required=False) or 30
)
if INTERACTIVE_JWT_EXPIRATION <= 0:
raise ImproperlyConfigured("INTERACTIVE_JWT_EXPIRATION must be positive")
| 33.805755 | 94 | 0.739093 |
7955072cbad765e72de2fa778467d9bc6785de67 | 2,067 | py | Python | setup.py | tnolan8/pyeodhistorical | 0fed17925c479b36f035c17e9aeea0a22882b1e4 | [
"MIT"
] | 1 | 2021-04-20T11:50:56.000Z | 2021-04-20T11:50:56.000Z | setup.py | tnolan8/pyeodhistorical | 0fed17925c479b36f035c17e9aeea0a22882b1e4 | [
"MIT"
] | null | null | null | setup.py | tnolan8/pyeodhistorical | 0fed17925c479b36f035c17e9aeea0a22882b1e4 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
import io
def is_requirement(line):
line = line.strip()
# Skip blank lines, comments, and editable installs
return not (
line == ""
or line.startswith("--")
or line.startswith("-r")
or line.startswith("#")
or line.startswith("-e")
or line.startswith("git+")
)
def get_requirements(path):
with open(path) as f:
lines = f.readlines()
return [l.strip() for l in lines if is_requirement(l)]
setup(
name='pyeodhistoricaldata',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
# version='0.0.2',
version='0.0.3',
description='End Of Day Historical Data using Python',
# The project's main homepage.
url='https://github.com/tnolan8/pyeodhistoricaldata',
# Author details
author='Tom Nolan',
author_email='tomnolan95@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['python', 'trading', 'data', 'stock'],
install_requires=get_requirements("requirements.txt"),
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
zip_safe=False,
) | 34.45 | 146 | 0.657475 |
795508010bddfb8e246cbc3a4c561ede0c7439d2 | 6,870 | py | Python | exercises/hello-world-v4/metrics.py | GrahamDumpleton/lab-mod-wsgi-metrics | 6bf8371523992163548573928ae1b6a4bbb010de | [
"Apache-2.0"
] | null | null | null | exercises/hello-world-v4/metrics.py | GrahamDumpleton/lab-mod-wsgi-metrics | 6bf8371523992163548573928ae1b6a4bbb010de | [
"Apache-2.0"
] | null | null | null | exercises/hello-world-v4/metrics.py | GrahamDumpleton/lab-mod-wsgi-metrics | 6bf8371523992163548573928ae1b6a4bbb010de | [
"Apache-2.0"
] | null | null | null | import socket
import threading
import queue
import time
import atexit
import traceback
import os
from threading import Thread
from queue import Queue
import wrapt
import mod_wsgi
from influxdb import InfluxDBClient
from datetime import datetime
session_namespace = os.environ["SESSION_NAMESPACE"]
influxdb_hostname = f"{session_namespace}-influxdb"
client = InfluxDBClient(influxdb_hostname, 8086, 'wsgi', 'wsgi', 'wsgi')
interval = 1.0
hostname = socket.gethostname()
pid = os.getpid()
process = f"{hostname}:{pid}"
lock = threading.Lock()
data_points = []
@wrapt.synchronized(lock)
def record_metric(stop_time, duration):
"""Records a single metric, adding it to a list to later be sent
to InfluxDB in a batch.
"""
global data_points
# Metric is added as a Python dictionary to the list of data points
# with the list of dictionaries later being passed to the InfluxDB
# client to report.
data_points.append(
{
"measurement": "raw-requests",
"time": stop_time.isoformat(),
"tags": {
"hostname": hostname,
"process": process
},
"fields": {
"application_time": duration
}
}
)
def report_metrics():
"""Report the current batch of metrics to InfluxDB.
"""
global data_points
# Set aside the current batch of metrics and initialize the list
# used to collect metrics to empty again.
with wrapt.synchronized(lock):
pending_data_points = data_points
data_points = []
# Report the complete batch of metrics to InfluxDB in one go.
if pending_data_points:
try:
client.write_points(pending_data_points)
except Exception:
traceback.print_exc()
def collector():
next_time = time.time() + interval
while True:
next_time += interval
now = time.time()
try:
# Waiting for next schedule time to report metrics.
queue.get(timeout=max(0, next_time-now))
# If we get to here it means the process is being shutdown
# so we report any metrics that haven't been sent.
report_metrics()
return
except Exception:
# Timeout occurred on waiting on queue, which means the next
# reporting time has arrived.
pass
# Report the current batch of metrics.
report_metrics()
queue = Queue()
thread = Thread(target=collector, daemon=True)
def shutdown_handler(*args, **kwargs):
queue.put(None)
thread.join(timeout=3.0)
def enable_reporting():
# Subscribe to shutdown of the application so we can report the last
# batch of metrics and notify the collector thread to shutdown.
if hasattr(mod_wsgi, "subscribe_shutdown"):
mod_wsgi.subscribe_shutdown(shutdown_handler)
else:
atexit.register(shutdown_handler)
# Start collector thread for periodically reporting accumlated metrics.
thread.start()
class WSGIApplicationIterable(wrapt.ObjectProxy):
"""A wrapper object for the result returned by the WSGI application when
called. It uses a transparent object proxy to wrap the original response
such that any operations are passed through to the original. The only
exception is that the call to the close() method of any iterable by the
WSGI server is intercepted and used to close out timing for the request
and report a metric to InfluxDB.
"""
def __init__(self, wrapped, start_time):
super().__init__(wrapped)
# Save away the time the wrapped function was called.
self._self_start_time = start_time
def close(self):
# A close() method on an iterable returned from a WSGI application
# is required to be called by the WSGI server at the end of a request,
# whether the request was successful, or an exception was raised.
# If the original wrapped object returned by the WSGI application
# provided a close() method we need to ensure it is in turn called.
try:
if hasattr(self.__wrapped__, 'close'):
self.__wrapped__.close()
finally:
# Remember the time the close() function was called.
stop_time = datetime.now()
# Calculate how long the function took to run.
duration = (stop_time - self._self_start_time).total_seconds()
# Record the metric for the function call.
record_metric(stop_time, duration)
@wrapt.decorator
def wsgi_application(wrapped, instance, args, kwargs):
"""Reports a metric to InfluxDB for each HTTP request handled
by the wrapped WSGI application.
"""
# Remember time the wrapped function was called.
start_time = datetime.now()
try:
# Call the wrapped function. The result can be any iterable, but may
# specifically be a generator. In any case, the WSGI server would
# iterate over the result and consume the yielded response. Any code
# implementing the WSGI application may not be executed until the WSGI
# server starts to consume the response. This is the case for a
# generator, but could also be the case for custom iterable responses.
# It is only for the simple case of the iterable being a list of
# strings where no further code execution to generate the content will
# occur after this point.
result = wrapped(*args, **kwargs)
# Rather than return the result, we wrap the result in a transparent
# object proxy and return that instead. Because a transparent object
# proxy is used, any actions to consume the iterable get transferred
# through to the result object wrapped by the proxy. As such the
# wrapper object doesn't need to implement methods for an iterable and
# make the calls to the wrapped object itself. The wrapper object only
# provides and intercepts a call to the close() method of any
# iterable.
return WSGIApplicationIterable(result, start_time)
except:
# This case handles where the calling of the wrapped function resulted
# in an exception. This could occur where the wrapped function is not
# a generator. We need to record a metric still even when it fails. So
# remember the time the wrapped function returned.
stop_time = datetime.now()
# Calculate how long the function took to run.
duration = (stop_time - start_time).total_seconds()
# Record the metric for the function call.
record_metric(stop_time, duration)
# Raise the original exception so that the WSGI server still sees
# it and logs the details.
raise
| 30 | 78 | 0.664192 |
7955086afad6cc87f9c7dd03f2519d6069369260 | 7,554 | py | Python | contrib/bitrpc/bitrpc.py | Whitecoin-XWC/XWCC_Wallet | a568af3bdcbe473a791025e9d6c01626781f5967 | [
"MIT"
] | 24 | 2017-06-29T03:12:13.000Z | 2021-03-13T17:16:20.000Z | contrib/bitrpc/bitrpc.py | Whitecoin-XWC/XWCC_Wallet | a568af3bdcbe473a791025e9d6c01626781f5967 | [
"MIT"
] | 1 | 2020-12-03T10:06:32.000Z | 2020-12-03T10:06:32.000Z | contrib/bitrpc/bitrpc.py | Whitecoin-XWC/XWCC_Wallet | a568af3bdcbe473a791025e9d6c01626781f5967 | [
"MIT"
] | 17 | 2017-05-15T06:40:32.000Z | 2019-11-30T05:50:35.000Z | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:15815")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:15815")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Whitecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Whitecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.057325 | 79 | 0.668388 |
795508a13b5ee83e355ff5e56f343dba927764f0 | 5,370 | py | Python | src/processing/IJM/lipidomics/4.0-lipidomics_processing.py | coongroup/MITOMICS | 6515c23017288ee91984ba052ce6b6cc74ade60a | [
"MIT"
] | null | null | null | src/processing/IJM/lipidomics/4.0-lipidomics_processing.py | coongroup/MITOMICS | 6515c23017288ee91984ba052ce6b6cc74ade60a | [
"MIT"
] | null | null | null | src/processing/IJM/lipidomics/4.0-lipidomics_processing.py | coongroup/MITOMICS | 6515c23017288ee91984ba052ce6b6cc74ade60a | [
"MIT"
] | null | null | null | # load modules
import pandas as pd
import re
import numpy as np
from collections import Counter
# load proteomics data
lipidomics_path = "combined_lipidomics_data_filtered.tsv"
lipidomics_df = pd.read_csv(lipidomics_path, sep="\t", index_col=0)
# drop batch column and transpose
lipidomics_df = lipidomics_df.drop(['batch'], axis=1).T
# load in lipidomics key from master list
lipidomics_key_path = "../proteomics/H3K_Project_Master_Lists.xlsx"
lipidomics_key_df = pd.read_excel(lipidomics_key_path, sheet_name='Lipid Samples')
# first need to create a list of cell lines with duplicate KOs (i.e., with "*-2")
duplicate_ko_list = []
for ko in list(set(lipidomics_key_df['H3K Cell Line Name'])):
# skip BLANK
if not ko == "BLANK":
# check for "-2"
if ko.split("-")[1] == "2":
# add the prefix to the list
duplicate_ko_list.append(ko.split("-")[0])
# populate a dict to map sample id to cell line name (in proper format: ADCK2-KO1 vs ADCK2-KO2)
id_to_cell_line_dict = {}
# iterate through proteomics key
for index, row in lipidomics_key_df.iterrows():
# get cell line
cell_line = row['H3K Cell Line Name']
# skip BLANK
if not cell_line == "BLANK":
# get sample id
sample_id = row['Sample ID']
# pull out sample as int / string
sample_pattern = "[A-Z](\d+)L"
sample_id = re.search(sample_pattern, sample_id).group(1)
sample_id = str(int(sample_id))
# rename if not WT
if not re.match("^WT-", cell_line):
# standardize name (i.e., 'ADCK2-KO2' -> 'ADCK2-KO2')
cell_line_split = cell_line.split("-")
if cell_line_split[0] in duplicate_ko_list:
cell_line = cell_line_split[0] + "-KO" + cell_line_split[1] # ADCK2 + -KO + 2
# else just add "-KO"
else:
cell_line = cell_line_split[0] + "-KO"
else:
cell_line = "WT"
# populate dictionary
# check that it's consistent if repeated
if not sample_id in id_to_cell_line_dict:
id_to_cell_line_dict[sample_id] = cell_line
else:
# will drop all WT samples anyway, so preserving precise mapping should not matter here
if not cell_line == "WT":
assert(id_to_cell_line_dict[sample_id] == cell_line)
# pWT samples
pWT_cols = [col for col in lipidomics_df.columns if "PWT" in col]
# create an update colname list
updated_colnames = []
# iterate through colnames
for column in lipidomics_df.columns:
# skip pWT samples (will average those)
if not "PWT" in column:
# parse out sample id (might have to handle pWT separately)
sample_id_pattern = "[A-Z](\d+)L" # "A136L Quant Values"
sample_id = str(int(re.search(sample_id_pattern, column).group(1)))
# parse out replicate id
replicate_pattern = "([ABCDE])\d+L" # "A136L Quant Values"
replicate = re.search(replicate_pattern, column).group(1)
# get updated cell line
cell_line = id_to_cell_line_dict[sample_id]
# add new colnames
updated_colname = cell_line + "-" + replicate
# add to updated_colnames list
updated_colnames.append(updated_colname)
else:
# get batch number
batch_pattern = "PWT-(\d+)-\d+"
batch = re.search(batch_pattern, column).group(1)
# get replicate number
replicate_pattern = "PWT-\d+-(\d+)"
replicate = re.search(replicate_pattern, column).group(1)
# rename
updated_column = "PWT-BATCH{}-{}".format(batch, replicate)
updated_colnames.append(updated_column)
# create an update colname list
updated_colnames = []
# iterate through colnames
for column in lipidomics_df.columns:
# skip pWT samples (will average those)
if not "PWT" in column:
# parse out sample id (might have to handle pWT separately)
sample_id_pattern = "[A-Z](\d+)L" # "A136L Quant Values"
sample_id = str(int(re.search(sample_id_pattern, column).group(1)))
# parse out replicate id
replicate_pattern = "([ABCDE])\d+L" # "A136L Quant Values"
replicate = re.search(replicate_pattern, column).group(1)
# get updated cell line
cell_line = id_to_cell_line_dict[sample_id]
# add new colnames
updated_colname = cell_line + "-" + replicate
# add to updated_colnames list
updated_colnames.append(updated_colname)
else:
# get batch number
batch_pattern = "PWT-(\d+)-\d+"
batch = re.search(batch_pattern, column).group(1)
# get replicate number
replicate_pattern = "PWT-\d+-(\d+)"
replicate = re.search(replicate_pattern, column).group(1)
# rename
updated_column = "PWT-BATCH{}-{}".format(batch, replicate)
updated_colnames.append(updated_column)
# now drop growth WT cols
gWT_cols = [col for col in lipidomics_df.columns if re.match("^WT-", col)]
lipidomics_subset_df = lipidomics_df.drop(gWT_cols, axis=1)
# write out data
outpath = "lipidomics_tier2.csv"
lipidomics_subset_df.to_csv(outpath, sep=",")
| 31.403509 | 99 | 0.622346 |
79550a37893f506c6e1c27e9cb55a76db0222c8b | 11,013 | py | Python | src/python/interop/interop/methods.py | iMilind/grpc | f5b20ce8ec0c1dde684840f6ea8dcf80822bbb1d | [
"BSD-3-Clause"
] | 1 | 2015-09-27T23:20:05.000Z | 2015-09-27T23:20:05.000Z | src/python/interop/interop/methods.py | iMilind/grpc | f5b20ce8ec0c1dde684840f6ea8dcf80822bbb1d | [
"BSD-3-Clause"
] | 3 | 2020-12-31T09:08:34.000Z | 2021-09-28T05:42:02.000Z | third_party/grpc/src/python/interop/interop/methods.py | acidburn0zzz/kythe | 6cd4e9c81a1158de43ec783607a4d7edd9b7e4a0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of interoperability test methods."""
import enum
import threading
from grpc.framework.alpha import utilities
from interop import empty_pb2
from interop import messages_pb2
_TIMEOUT = 7
def _empty_call(request, unused_context):
return empty_pb2.Empty()
_CLIENT_EMPTY_CALL = utilities.unary_unary_invocation_description(
empty_pb2.Empty.SerializeToString, empty_pb2.Empty.FromString)
_SERVER_EMPTY_CALL = utilities.unary_unary_service_description(
_empty_call, empty_pb2.Empty.FromString,
empty_pb2.Empty.SerializeToString)
def _unary_call(request, unused_context):
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
_CLIENT_UNARY_CALL = utilities.unary_unary_invocation_description(
messages_pb2.SimpleRequest.SerializeToString,
messages_pb2.SimpleResponse.FromString)
_SERVER_UNARY_CALL = utilities.unary_unary_service_description(
_unary_call, messages_pb2.SimpleRequest.FromString,
messages_pb2.SimpleResponse.SerializeToString)
def _streaming_output_call(request, unused_context):
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.response_type,
body=b'\x00' * response_parameters.size))
_CLIENT_STREAMING_OUTPUT_CALL = utilities.unary_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_STREAMING_OUTPUT_CALL = utilities.unary_stream_service_description(
_streaming_output_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
def _streaming_input_call(request_iterator, unused_context):
aggregate_size = 0
for request in request_iterator:
if request.payload and request.payload.body:
aggregate_size += len(request.payload.body)
return messages_pb2.StreamingInputCallResponse(
aggregated_payload_size=aggregate_size)
_CLIENT_STREAMING_INPUT_CALL = utilities.stream_unary_invocation_description(
messages_pb2.StreamingInputCallRequest.SerializeToString,
messages_pb2.StreamingInputCallResponse.FromString)
_SERVER_STREAMING_INPUT_CALL = utilities.stream_unary_service_description(
_streaming_input_call,
messages_pb2.StreamingInputCallRequest.FromString,
messages_pb2.StreamingInputCallResponse.SerializeToString)
def _full_duplex_call(request_iterator, unused_context):
for request in request_iterator:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.payload.type,
body=b'\x00' * request.response_parameters[0].size))
_CLIENT_FULL_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_FULL_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
# NOTE(nathaniel): Apparently this is the same as the full-duplex call?
_CLIENT_HALF_DUPLEX_CALL = utilities.stream_stream_invocation_description(
messages_pb2.StreamingOutputCallRequest.SerializeToString,
messages_pb2.StreamingOutputCallResponse.FromString)
_SERVER_HALF_DUPLEX_CALL = utilities.stream_stream_service_description(
_full_duplex_call,
messages_pb2.StreamingOutputCallRequest.FromString,
messages_pb2.StreamingOutputCallResponse.SerializeToString)
SERVICE_NAME = 'grpc.testing.TestService'
_EMPTY_CALL_METHOD_NAME = 'EmptyCall'
_UNARY_CALL_METHOD_NAME = 'UnaryCall'
_STREAMING_OUTPUT_CALL_METHOD_NAME = 'StreamingOutputCall'
_STREAMING_INPUT_CALL_METHOD_NAME = 'StreamingInputCall'
_FULL_DUPLEX_CALL_METHOD_NAME = 'FullDuplexCall'
_HALF_DUPLEX_CALL_METHOD_NAME = 'HalfDuplexCall'
CLIENT_METHODS = {
_EMPTY_CALL_METHOD_NAME: _CLIENT_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _CLIENT_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _CLIENT_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _CLIENT_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _CLIENT_HALF_DUPLEX_CALL,
}
SERVER_METHODS = {
_EMPTY_CALL_METHOD_NAME: _SERVER_EMPTY_CALL,
_UNARY_CALL_METHOD_NAME: _SERVER_UNARY_CALL,
_STREAMING_OUTPUT_CALL_METHOD_NAME: _SERVER_STREAMING_OUTPUT_CALL,
_STREAMING_INPUT_CALL_METHOD_NAME: _SERVER_STREAMING_INPUT_CALL,
_FULL_DUPLEX_CALL_METHOD_NAME: _SERVER_FULL_DUPLEX_CALL,
_HALF_DUPLEX_CALL_METHOD_NAME: _SERVER_HALF_DUPLEX_CALL,
}
def _empty_unary(stub):
with stub:
response = stub.EmptyCall(empty_pb2.Empty(), _TIMEOUT)
if not isinstance(response, empty_pb2.Empty):
raise TypeError(
'response is of type "%s", not empty_pb2.Empty!', type(response))
def _large_unary(stub):
with stub:
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE, response_size=314159,
payload=messages_pb2.Payload(body=b'\x00' * 271828))
response_future = stub.UnaryCall.async(request, _TIMEOUT)
response = response_future.result()
if response.payload.type is not messages_pb2.COMPRESSABLE:
raise ValueError(
'response payload type is "%s"!' % type(response.payload.type))
if len(response.payload.body) != 314159:
raise ValueError(
'response body of incorrect size %d!' % len(response.payload.body))
def _client_streaming(stub):
with stub:
payload_body_sizes = (27182, 8, 1828, 45904)
payloads = (
messages_pb2.Payload(body=b'\x00' * size)
for size in payload_body_sizes)
requests = (
messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads)
response = stub.StreamingInputCall(requests, _TIMEOUT)
if response.aggregated_payload_size != 74922:
raise ValueError(
'incorrect size %d!' % response.aggregated_payload_size)
def _server_streaming(stub):
sizes = (31415, 9, 2653, 58979)
with stub:
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=sizes[0]),
messages_pb2.ResponseParameters(size=sizes[1]),
messages_pb2.ResponseParameters(size=sizes[2]),
messages_pb2.ResponseParameters(size=sizes[3]),
))
response_iterator = stub.StreamingOutputCall(request, _TIMEOUT)
for index, response in enumerate(response_iterator):
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != sizes[index]:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def _ping_pong(stub):
request_response_sizes = (31415, 9, 2653, 58979)
request_payload_sizes = (27182, 8, 1828, 45904)
with stub:
pipe = _Pipe()
response_iterator = stub.FullDuplexCall(pipe, _TIMEOUT)
print 'Starting ping-pong with response iterator %s' % response_iterator
for response_size, payload_size in zip(
request_response_sizes, request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(messages_pb2.ResponseParameters(
size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != response_size:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
pipe.close()
@enum.unique
class TestCase(enum.Enum):
EMPTY_UNARY = 'empty_unary'
LARGE_UNARY = 'large_unary'
SERVER_STREAMING = 'server_streaming'
CLIENT_STREAMING = 'client_streaming'
PING_PONG = 'ping_pong'
def test_interoperability(self, stub):
if self is TestCase.EMPTY_UNARY:
_empty_unary(stub)
elif self is TestCase.LARGE_UNARY:
_large_unary(stub)
elif self is TestCase.SERVER_STREAMING:
_server_streaming(stub)
elif self is TestCase.CLIENT_STREAMING:
_client_streaming(stub)
elif self is TestCase.PING_PONG:
_ping_pong(stub)
else:
raise NotImplementedError('Test case "%s" not implemented!' % self.name)
| 37.975862 | 78 | 0.764006 |
79550c496c01cc593c5a60bb791235d515c0b59e | 1,352 | py | Python | gamma_limits_sensitivity/tests/test_command_predict.py | mahnen/gamma-limits-sensitivity | f105d7e2ba5bdc5cabd48594151968304850a499 | [
"MIT"
] | 4 | 2021-06-11T03:37:38.000Z | 2021-06-11T10:42:24.000Z | tests/test_command_predict.py | JohannesBuchner/poissonregime | 8047914428bb9e4f0f6fc366111d7a9d42701502 | [
"MIT"
] | 4 | 2016-11-14T11:58:48.000Z | 2018-03-20T11:42:11.000Z | tests/test_command_predict.py | JohannesBuchner/poissonregime | 8047914428bb9e4f0f6fc366111d7a9d42701502 | [
"MIT"
] | null | null | null | '''
[TODO] Explain logic behind command: 'predict'
'''
import gamma_limits_sensitivity as gls
import numpy as np
import matplotlib
from helper_functions_for_tests import get_effective_area_list
def test_get_predict_phasespace_figure():
'''
A simple test for the get_phasespace_figure() function
'''
a_eff_list = get_effective_area_list()
chosen_index = 1
predict_phasespace_figure = gls.get_predict_phasespace_figure(
sigma_bg=7./3600.,
alpha=0.2,
f_0=1e-12,
df_0=1e-13,
gamma=-2.6,
dgamma=0.1,
e_0=1.,
a_eff_interpol=a_eff_list[chosen_index],
pixels_per_line=2
)
assert isinstance(predict_phasespace_figure, matplotlib.figure.Figure)
def test_get_predict_spectrum_figure():
'''
A simple test for the get_spectrum_figure() function
'''
a_eff_list = get_effective_area_list()
chosen_index = 0
predict_spectrum_figure, __a, __b = gls.get_predict_spectrum_figure(
sigma_bg=7./3600.,
alpha=0.2,
t_obs_est=[1.*3600., 2.*3600., 3.*3600.],
f_0=1e-12,
df_0=1e-13,
gamma=-2.6,
dgamma=0.1,
e_0=1.,
a_eff_interpol=a_eff_list[chosen_index],
n_points_to_plot=2
)
assert isinstance(predict_spectrum_figure, matplotlib.figure.Figure)
| 25.037037 | 74 | 0.659763 |
79550d7a522ccab167495b644b137e72bde01074 | 1,014 | py | Python | gsse_python_client/testSet.py | ogdans3/gsse-python-client | 4ee6409aa3f3c558fbd4d43ccb53fdd91a7923f5 | [
"MIT"
] | null | null | null | gsse_python_client/testSet.py | ogdans3/gsse-python-client | 4ee6409aa3f3c558fbd4d43ccb53fdd91a7923f5 | [
"MIT"
] | null | null | null | gsse_python_client/testSet.py | ogdans3/gsse-python-client | 4ee6409aa3f3c558fbd4d43ccb53fdd91a7923f5 | [
"MIT"
] | null | null | null |
class TestSet:
def __init__(self, stocks=[], fromTime="", toTime="", exchange=""):
self.tickers = stocks
self.fromTime = fromTime
self.toTime = toTime
self.exchange = exchange
def addStock(self, ticker):
self.tickers.append(ticker)
def removeStock(self, ticker):
self.tickers.remove(ticker)
def getStocks(self):
return self.tickers
def setFromTime(self, fromTime):
self.fromTime = fromTime
def setToTime(self, toTime):
self.toTime = toTime
def getFromTime(self):
return self.fromTime
def getToTime(self):
return self.toTime
def getExchange(self):
return self.exchange
def setExchange(self, exchange):
self.exchange = exchange
def toJSON(self):
obj = {}
obj["tickers"] = self.getStocks()
obj["fromTime"] = self.getFromTime()
obj["toTime"] = self.getToTime()
obj["exchange"] = self.getExchange()
return obj
| 22.533333 | 71 | 0.599606 |
79550f14826e411fd7827f251a7b9d5f85be8f7b | 1,721 | py | Python | localization/launch/particle_filter_localizer.launch.py | pr05/software-training | b38036bf0d2231b0c8a058b9b7bcb27883bd4bba | [
"MIT"
] | null | null | null | localization/launch/particle_filter_localizer.launch.py | pr05/software-training | b38036bf0d2231b0c8a058b9b7bcb27883bd4bba | [
"MIT"
] | null | null | null | localization/launch/particle_filter_localizer.launch.py | pr05/software-training | b38036bf0d2231b0c8a058b9b7bcb27883bd4bba | [
"MIT"
] | null | null | null | # Copyright 2021 RoboJackets
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
# BEGIN STUDENT CODE
# END STUDENT CODE
return LaunchDescription([
Node(
package='localization',
executable='localization_node',
output='screen',
# BEGIN STUDENT CODE
# END STUDENT CODE
remappings=[
('/tags', '/coordinate_transformer/tags_transformed')
]
)
])
| 39.113636 | 79 | 0.732132 |
79550fd3f0c4322212c8ceb81c26ee585df680a8 | 56,382 | py | Python | Integrations/Slack/Slack.py | JustinJohnWilliams/content | 13e26fe338d7f90cc6e95a673316d1445033d31d | [
"MIT"
] | null | null | null | Integrations/Slack/Slack.py | JustinJohnWilliams/content | 13e26fe338d7f90cc6e95a673316d1445033d31d | [
"MIT"
] | null | null | null | Integrations/Slack/Slack.py | JustinJohnWilliams/content | 13e26fe338d7f90cc6e95a673316d1445033d31d | [
"MIT"
] | null | null | null | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import slack
from slack.errors import SlackApiError
from distutils.util import strtobool
import asyncio
import concurrent
import requests
from typing import Tuple
# disable unsecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
SEVERITY_DICT = {
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
USER_TAG_EXPRESSION = '<@(.*?)>'
CHANNEL_TAG_EXPRESSION = '<#(.*?)>'
URL_EXPRESSION = r'<(https?://.+?)(?:\|.+)?>'
GUID_REGEX = r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
ENTITLEMENT_REGEX = r'{}@(({})|\d+)(\|\S+)?\b'.format(GUID_REGEX, GUID_REGEX)
MESSAGE_FOOTER = '\n**From Slack**'
MIRROR_TYPE = 'mirrorEntry'
INCIDENT_OPENED = 'incidentOpened'
INCIDENT_NOTIFICATION_CHANNEL = 'incidentNotificationChannel'
PLAYGROUND_INVESTIGATION_TYPE = 9
WARNING_ENTRY_TYPE = 11
ENDPOINT_URL = 'https://oproxy.demisto.ninja/slack-poll'
POLL_INTERVAL_MINUTES = 1
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
''' GLOBALS '''
BOT_TOKEN: str
ACCESS_TOKEN: str
PROXY: str
DEDICATED_CHANNEL: str
CLIENT: slack.WebClient
CHANNEL_CLIENT: slack.WebClient
ALLOW_INCIDENTS: bool
NOTIFY_INCIDENTS: bool
INCIDENT_TYPE: str
SEVERITY_THRESHOLD: int
VERIFY_CERT: bool
QUESTION_LIFETIME: int
''' HELPER FUNCTIONS '''
def get_bot_id() -> str:
"""
Gets the app bot ID
:return: The app bot ID
"""
response = CLIENT.auth_test()
return response.get('user_id')
def test_module():
"""
Sends a test message to the dedicated slack channel.
"""
if not DEDICATED_CHANNEL:
return_error('A dedicated slack channel must be provided.')
channel = get_conversation_by_name(DEDICATED_CHANNEL)
if not channel:
return_error('Dedicated channel not found.')
message = 'Hi there! This is a test message.'
CLIENT.chat_postMessage(channel=channel.get('id'), text=message)
demisto.results('ok')
def get_user_by_name(user_to_search: str) -> dict:
"""
Gets a slack user by a user name
:param user_to_search: The user name or email
:return: A slack user object
"""
user: dict = {}
users: list = []
integration_context = demisto.getIntegrationContext()
user_to_search = user_to_search.lower()
if integration_context.get('users'):
users = json.loads(integration_context['users'])
users_filter = list(filter(lambda u: u.get('name', '').lower() == user_to_search
or u.get('profile', {}).get('email', '').lower() == user_to_search
or u.get('real_name', '').lower() == user_to_search, users))
if users_filter:
user = users_filter[0]
if not user:
response = CLIENT.users_list(limit=200)
while True:
workspace_users = response['members'] if response and response.get('members', []) else []
cursor = response.get('response_metadata', {}).get('next_cursor')
users_filter = list(filter(lambda u: u.get('name', '').lower() == user_to_search
or u.get('profile', {}).get('email', '').lower() == user_to_search
or u.get('real_name', '').lower() == user_to_search, workspace_users))
if users_filter:
break
if not cursor:
break
response = CLIENT.users_list(limit=200, cursor=cursor)
if users_filter:
user = users_filter[0]
users.append(user)
set_to_latest_integration_context('users', users)
else:
return {}
return user
def search_slack_users(users) -> list:
"""
Search given users in Slack
:param users: The users to find
:return: The slack users
"""
slack_users = []
if not isinstance(users, list):
users = [users]
for user in users:
slack_user = get_user_by_name(user)
if not slack_user:
demisto.results({
'Type': WARNING_ENTRY_TYPE,
'Contents': 'User {} not found in Slack'.format(user),
'ContentsFormat': formats['text']
})
else:
slack_users.append(slack_user)
return slack_users
def find_mirror_by_investigation() -> dict:
mirror: dict = {}
investigation = demisto.investigation()
if investigation:
integration_context = demisto.getIntegrationContext()
if integration_context.get('mirrors'):
mirrors = json.loads(integration_context['mirrors'])
investigation_filter = list(filter(lambda m: investigation.get('id') == m['investigation_id'],
mirrors))
if investigation_filter:
mirror = investigation_filter[0]
return mirror
def set_to_latest_integration_context(key: str, value, wait: bool = False):
if wait:
time.sleep(5)
integration_context = demisto.getIntegrationContext()
integration_context[key] = json.dumps(value)
demisto.setIntegrationContext(integration_context)
''' MIRRORING '''
async def get_slack_name(slack_id: str, client) -> str:
"""
Get the slack name of a provided user or channel by its ID
:param client: The slack client
:param slack_id: The slack user or channel ID
:return: The slack user or channel name
"""
if not slack_id:
return ''
integration_context = demisto.getIntegrationContext()
prefix = slack_id[0]
slack_name = ''
if prefix in ['C', 'D', 'G']:
slack_id = slack_id.split('|')[0]
conversation: dict = {}
if integration_context.get('conversations'):
conversations = list(filter(lambda c: c['id'] == slack_id,
json.loads(integration_context['conversations'])))
if conversations:
conversation = conversations[0]
if not conversation:
conversation = (await client.conversations_info(channel=slack_id)).get('channel', {})
slack_name = conversation.get('name', '')
elif prefix == 'U':
user: dict = {}
if integration_context.get('users'):
users = list(filter(lambda u: u['id'] == slack_id, json.loads(integration_context['users'])))
if users:
user = users[0]
if not user:
user = (await client.users_info(user=slack_id)).get('user', {})
slack_name = user.get('name', '')
return slack_name
async def clean_message(message: str, client: slack.WebClient) -> str:
"""
Prettifies a slack message - replaces tags and URLs with clean expressions
:param message: The slack message
:param client: The slack client
:return: The clean slack message
"""
matches = re.findall(USER_TAG_EXPRESSION, message)
matches += re.findall(CHANNEL_TAG_EXPRESSION, message)
message = re.sub(USER_TAG_EXPRESSION, r'\1', message)
message = re.sub(CHANNEL_TAG_EXPRESSION, r'\1', message)
for match in matches:
slack_name = await get_slack_name(match, client)
message = message.replace(match, slack_name)
resolved_message = re.sub(URL_EXPRESSION, r'\1', message)
return resolved_message
def invite_users_to_conversation(conversation_id: str, users_to_invite: list):
"""
Invites users to a provided conversation using a provided slack client with a channel token.
:param conversation_id: The slack conversation ID to invite the users to.
:param users_to_invite: The user slack IDs to invite.
"""
for user in users_to_invite:
try:
CHANNEL_CLIENT.conversations_invite(channel=conversation_id, users=user)
except SlackApiError as e:
message = str(e)
if message.find('cant_invite_self') == -1:
raise
def kick_users_from_conversation(conversation_id: str, users_to_kick: list):
"""
Kicks users from a provided conversation using a provided slack client with a channel token.
:param conversation_id: The slack conversation ID to kick the users from.
:param users_to_kick: The user slack IDs to kick.
"""
for user in users_to_kick:
try:
CHANNEL_CLIENT.conversations_kick(channel=conversation_id, user=user)
except SlackApiError as e:
message = str(e)
if message.find('cant_invite_self') == -1:
raise
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
mirror_type = demisto.args().get('type', 'all')
auto_close = demisto.args().get('autoclose', 'true')
mirror_direction = demisto.args().get('direction', 'both')
mirror_to = demisto.args().get('mirrorTo', 'group')
channel_name = demisto.args().get('channelName', '')
channel_topic = demisto.args().get('channelTopic', '')
kick_admin = bool(strtobool(demisto.args().get('kickAdmin', 'false')))
investigation = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
return_error('Can not perform this action in playground.')
integration_context = demisto.getIntegrationContext()
if not integration_context or not integration_context.get('mirrors', []):
mirrors: list = []
else:
mirrors = json.loads(integration_context['mirrors'])
if not integration_context or not integration_context.get('conversations', []):
conversations: list = []
else:
conversations = json.loads(integration_context['conversations'])
investigation_id = investigation.get('id')
users = investigation.get('users')
slack_users = search_slack_users(users)
send_first_message = False
users_to_invite = list(map(lambda u: u.get('id'), slack_users))
current_mirror = list(filter(lambda m: m['investigation_id'] == investigation_id, mirrors))
channel_filter: list = []
if channel_name:
channel_filter = list(filter(lambda m: m['channel_name'] == channel_name, mirrors))
if not current_mirror:
channel_name = channel_name or 'incident-{}'.format(investigation_id)
if not channel_filter:
if mirror_to == 'channel':
conversation = CHANNEL_CLIENT.channels_create(name=channel_name).get('channel', {})
else:
conversation = CHANNEL_CLIENT.groups_create(name=channel_name).get('group', {})
conversation_name = conversation.get('name')
conversation_id = conversation.get('id')
conversations.append(conversation)
send_first_message = True
else:
mirrored_channel = channel_filter[0]
conversation_id = mirrored_channel['channel_id']
conversation_name = mirrored_channel['channel_name']
mirror = {
'channel_id': conversation_id,
'channel_name': conversation_name,
'investigation_id': investigation.get('id'),
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'mirror_to': mirror_to,
'auto_close': bool(strtobool(auto_close)),
'mirrored': False
}
else:
mirror = mirrors.pop(mirrors.index(current_mirror[0]))
conversation_id = mirror['channel_id']
if mirror_type:
mirror['mirror_type'] = mirror_type
if auto_close:
mirror['auto_close'] = bool(strtobool(auto_close))
if mirror_direction:
mirror['mirror_direction'] = mirror_direction
if mirror_to and mirror['mirror_to'] != mirror_to:
return_error('Cannot change the Slack channel type from Demisto.')
if channel_name:
return_error('Cannot change the Slack channel name.')
if channel_topic:
return_error('Cannot change the Slack channel topic.')
conversation_name = mirror['channel_name']
mirror['mirrored'] = False
set_topic = False
if channel_topic:
set_topic = True
else:
mirror_name = 'incident-{}'.format(investigation_id)
channel_filter = list(filter(lambda m: m['channel_name'] == conversation_name, mirrors))
if 'channel_topic' in mirror:
channel_topic = mirror['channel_topic']
elif channel_filter:
channel_mirror = channel_filter[0]
channel_topic = channel_mirror['channel_topic']
else:
channel_topic = ''
mirrored_investigations_ids = list(map(lambda m: 'incident-{}'
.format(m['investigation_id']), channel_filter))
if not channel_topic or channel_topic.find('incident-') != -1:
new_topic = ', '.join(mirrored_investigations_ids + [mirror_name])
if channel_topic != new_topic:
channel_topic = new_topic
set_topic = True
if set_topic:
CHANNEL_CLIENT.conversations_setTopic(channel=conversation_id, topic=channel_topic)
mirror['channel_topic'] = channel_topic
if mirror_type != 'none':
if integration_context.get('bot_id'):
bot_id = integration_context['bot_id']
else:
bot_id = get_bot_id()
users_to_invite += [bot_id]
invite_users_to_conversation(conversation_id, users_to_invite)
integration_context['bot_id'] = bot_id
mirrors.append(mirror)
set_to_latest_integration_context('mirrors', mirrors)
set_to_latest_integration_context('conversations', conversations)
if kick_admin:
CHANNEL_CLIENT.conversations_leave(channel=conversation_id)
if send_first_message:
server_links = demisto.demistoUrls()
server_link = server_links.get('server')
CLIENT.chat_postMessage(channel=conversation_id,
text='This channel was created to mirror incident {}. \n View it on: {}#/WarRoom/{}'
.format(investigation_id, server_link, investigation_id))
demisto.results('Investigation mirrored successfully, channel: {}'.format(conversation_name))
def long_running_loop():
"""
Runs in a long running container - checking for newly mirrored investigations and answered questions.
"""
while True:
error = ''
try:
check_for_mirrors()
check_for_answers(datetime.utcnow())
except requests.exceptions.ConnectionError as e:
error = 'Could not connect to the Slack endpoint: {}'.format(str(e))
except Exception as e:
error = 'An error occurred: {}'.format(str(e))
demisto.error(error)
finally:
if error:
demisto.updateModuleHealth(error)
time.sleep(5)
def check_for_answers(now: datetime):
"""
Checks for answered questions
:param now: The current date.
"""
integration_context = demisto.getIntegrationContext()
questions = integration_context.get('questions', [])
users = integration_context.get('users', [])
if questions:
questions = json.loads(questions)
if users:
users = json.loads(users)
now_string = datetime.strftime(now, DATE_FORMAT)
for question in questions:
if question.get('last_poll_time'):
if question.get('expiry'):
# Check if the question expired - if it did, answer it with the default response and remove it
expiry = datetime.strptime(question['expiry'], DATE_FORMAT)
if expiry < now:
answer_question(question.get('default_response'), question, questions)
continue
# Check if it has been enough time(determined by the POLL_INTERVAL_MINUTES parameter)
# since the last polling time. if not, continue to the next question until it has.
last_poll_time = datetime.strptime(question['last_poll_time'], DATE_FORMAT)
delta = now - last_poll_time
minutes = delta.total_seconds() / 60
if minutes < POLL_INTERVAL_MINUTES:
continue
demisto.info('Slack - polling for an answer for entitlement {}'.format(question.get('entitlement')))
question['last_poll_time'] = now_string
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
body = {
'token': BOT_TOKEN,
'entitlement': question.get('entitlement')
}
res = requests.post(ENDPOINT_URL, data=json.dumps(body), headers=headers, verify=VERIFY_CERT)
if res.status_code != 200:
demisto.error('Slack - failed to poll for answers: {}, status code: {}'
.format(res.content, res.status_code))
continue
answer: dict = {}
try:
answer = res.json()
except Exception:
demisto.info('Slack - Could not parse response for entitlement {}: {}'
.format(question.get('entitlement'), res.content))
pass
if not answer:
continue
payload_json: str = answer.get('payload', '')
if not payload_json:
continue
payload = json.loads(payload_json)
actions = payload.get('actions', [])
if actions:
demisto.info('Slack - received answer from user for entitlement {}.'.format(question.get('entitlement')))
user_id = payload.get('user', {}).get('id')
user_filter = list(filter(lambda u: u['id'] == user_id, users))
if user_filter:
user = user_filter[0]
else:
user = CLIENT.users_info(user=user_id).get('user', {})
users.append(user)
set_to_latest_integration_context('users', users)
answer_question(actions[0].get('text', {}).get('text'), question, questions,
user.get('profile', {}).get('email'))
questions = list(filter(lambda q: q.get('remove', False) is False, questions))
set_to_latest_integration_context('questions', questions)
def answer_question(text: str, question: dict, questions: list, email: str = ''):
content, guid, incident_id, task_id = extract_entitlement(question.get('entitlement', ''), text)
try:
demisto.handleEntitlementForUser(incident_id, guid, email, content, task_id)
except Exception as e:
demisto.error('Failed handling entitlement {}: {}'.format(question.get('entitlement'), str(e)))
question['remove'] = True
set_to_latest_integration_context('questions', questions)
def check_for_mirrors():
"""
Checks for newly created mirrors and updates the server accordingly
"""
integration_context = demisto.getIntegrationContext()
if integration_context.get('mirrors'):
mirrors = json.loads(integration_context['mirrors'])
for mirror in mirrors:
if not mirror['mirrored']:
demisto.info('Mirroring: {}'.format(mirror['investigation_id']))
mirror = mirrors.pop(mirrors.index(mirror))
if mirror['mirror_to'] and mirror['mirror_direction'] and mirror['mirror_type']:
investigation_id = mirror['investigation_id']
mirror_type = mirror['mirror_type']
auto_close = mirror['auto_close']
direction = mirror['mirror_direction']
if isinstance(auto_close, str):
auto_close = bool(strtobool(auto_close))
demisto.mirrorInvestigation(investigation_id, '{}:{}'.format(mirror_type, direction), auto_close)
mirror['mirrored'] = True
mirrors.append(mirror)
else:
demisto.info('Could not mirror {}'.format(mirror['investigation_id']))
set_to_latest_integration_context('mirrors', mirrors)
def extract_entitlement(entitlement: str, text: str) -> Tuple[str, str, str, str]:
"""
Extracts entitlement components from an entitlement string
:param entitlement: The entitlement itself
:param text: The actual reply text
:return: Entitlement components
"""
parts = entitlement.split('@')
guid = parts[0]
id_and_task = parts[1].split('|')
incident_id = id_and_task[0]
task_id = ''
if len(id_and_task) > 1:
task_id = id_and_task[1]
content = text.replace(entitlement, '', 1)
return content, guid, incident_id, task_id
async def slack_loop():
"""
Starts a Slack RTM client while checking the connection.
"""
while True:
loop = asyncio.get_running_loop()
rtm_client = None
try:
rtm_client = slack.RTMClient(
token=BOT_TOKEN,
run_async=True,
loop=loop,
auto_reconnect=False
)
client_future = rtm_client.start()
while True:
await asyncio.sleep(10)
if rtm_client._websocket is None or rtm_client._websocket.closed or client_future.done():
ex = client_future.exception()
if ex:
demisto.error('Slack client raised an exception: {}'.format(ex))
demisto.info('Slack - websocket is closed or done')
break
except Exception as e:
error = 'Slack client raised an exception: {}'.format(e)
await handle_listen_error(error)
finally:
# If we got here, the websocket is closed or the client can't connect. Will try to connect every 5 seconds.
if rtm_client and not rtm_client._stopped:
rtm_client.stop()
await asyncio.sleep(5)
async def handle_listen_error(error: str):
"""
Logs an error and updates the module health accordingly.
:param error: The error string.
"""
demisto.error(error)
demisto.updateModuleHealth(error)
async def start_listening():
"""
Starts a Slack RTM client and checks for mirrored incidents.
"""
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
loop = asyncio.get_running_loop()
loop.run_in_executor(executor, long_running_loop)
await slack_loop()
async def handle_dm(user: dict, text: str, client: slack.WebClient):
"""
Handles a direct message sent to the bot
:param user: The user who sent the message
:param text: The message text
:param client: The Slack client
:return: Text to return to the user
"""
demisto.info('Slack - handling direct message.')
message: str = text.lower()
if message.find('incident') != -1 and (message.find('create') != -1
or message.find('open') != -1
or message.find('new') != -1):
user_email = user.get('profile', {}).get('email')
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=user.get('name'))
if not demisto_user and not ALLOW_INCIDENTS:
data = 'You are not allowed to create incidents.'
else:
data = await translate_create(demisto_user, text)
else:
try:
data = demisto.directMessage(text, user.get('name'), user.get('profile', {}).get('email'), ALLOW_INCIDENTS)
except Exception as e:
data = str(e)
if not data:
data = 'Sorry, I could not perform the selected operation.'
im = await client.im_open(user=user.get('id'))
channel = im.get('channel', {}).get('id')
await client.chat_postMessage(channel=channel, text=data)
async def translate_create(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern = r'(?<=json=).*'
name_pattern = r'(?<=name=).*'
type_pattern = r'(?<=type=).*'
message = message.replace("\n", '').replace('`', '').replace('```', '')
json_match = re.search(json_pattern, message)
created_incident = None
data = ''
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json = json_match.group()
incidents = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = await create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name = re.sub('type=.*', '', name_match.group()).strip()
incident_type = ''
type_match = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = await create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
server_links = demisto.demistoUrls()
server_link = server_links.get('server')
data = ('Successfully created incident {}.\n View it on: {}#/WarRoom/{}'
.format(created_incident['name'], server_link, created_incident['id']))
return data
async def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user['id'])
else:
data = demisto.createIncidents(incidents)
return data
@slack.RTMClient.run_on(event='message')
async def listen(**payload):
"""
Listens to Slack RTM messages
:param payload: The message payload
"""
data: dict = payload.get('data', {})
data_type: str = payload.get('type', '')
client: slack.WebClient = payload.get('web_client')
if data_type == 'error':
error = payload.get('error', {})
await handle_listen_error('Slack API has thrown an error. Code: {}, Message: {}.'
.format(error.get('code'), error.get('msg')))
return
try:
subtype = data.get('subtype', '')
text = data.get('text', '')
user_id = data.get('user', '')
channel = data.get('channel', '')
message_bot_id = data.get('bot_id', '')
thread = data.get('thread_ts', '')
message = data.get('message', {})
if subtype == 'bot_message' or message_bot_id or message.get('subtype') == 'bot_message':
return
integration_context = demisto.getIntegrationContext()
user = await get_user_by_id_async(client, integration_context, user_id)
entitlement_reply = await check_and_handle_entitlement(text, user, thread)
if entitlement_reply:
await client.chat_postMessage(channel=channel, text=entitlement_reply, thread_ts=thread)
elif channel and channel[0] == 'D':
# DM
await handle_dm(user, text, client)
else:
if not integration_context or 'mirrors' not in integration_context:
return
channel_id = data.get('channel')
mirrors = json.loads(integration_context['mirrors'])
mirror_filter = list(filter(lambda m: m['channel_id'] == channel_id, mirrors))
if not mirror_filter:
return
for mirror in mirror_filter:
if mirror['mirror_direction'] == 'FromDemisto' or mirror['mirror_type'] == 'none':
return
if not mirror['mirrored']:
# In case the investigation is not mirrored yet
mirror = mirrors.pop(mirrors.index(mirror))
if mirror['mirror_to'] and mirror['mirror_direction'] and mirror['mirror_type']:
investigation_id = mirror['investigation_id']
mirror_type = mirror['mirror_type']
auto_close = mirror['auto_close']
direction = mirror['mirror_direction']
if isinstance(auto_close, str):
auto_close = bool(strtobool(auto_close))
demisto.info('Mirroring: {}'.format(investigation_id))
demisto.mirrorInvestigation(investigation_id, '{}:{}'.format(mirror_type, direction),
auto_close)
mirror['mirrored'] = True
mirrors.append(mirror)
set_to_latest_integration_context('mirrors', mirrors)
investigation_id = mirror['investigation_id']
await handle_text(client, investigation_id, text, user)
# Reset module health
demisto.updateModuleHealth("")
except Exception as e:
await handle_listen_error('Error occurred while listening to Slack: {}'.format(str(e)))
async def get_user_by_id_async(client, integration_context, user_id):
user: dict = {}
users: list = []
if integration_context.get('users'):
users = json.loads(integration_context['users'])
user_filter = list(filter(lambda u: u['id'] == user_id, users))
if user_filter:
user = user_filter[0]
if not user:
user = (await client.users_info(user=user_id)).get('user', {})
users.append(user)
set_to_latest_integration_context('users', users)
return user
async def handle_text(client: slack.WebClient, investigation_id: str, text: str, user: dict):
"""
Handles text received in the Slack workspace (not DM)
:param client: The Slack client
:param investigation_id: The mirrored investigation ID
:param text: The received text
:param user: The sender
"""
demisto.info('Slack - adding entry to incident {}'.format(investigation_id))
if text:
demisto.addEntry(id=investigation_id,
entry=await clean_message(text, client),
username=user.get('name', ''),
email=user.get('profile', {}).get('email', ''),
footer=MESSAGE_FOOTER
)
async def check_and_handle_entitlement(text: str, user: dict, thread_id: str) -> str:
"""
Handles an entitlement message (a reply to a question)
:param text: The message text
:param user: The user who sent the reply
:param thread_id: The thread ID
:return: If the message contains entitlement, return a reply.
"""
entitlement_match = re.search(ENTITLEMENT_REGEX, text)
if entitlement_match:
demisto.info('Slack - handling entitlement in message.')
content, guid, incident_id, task_id = extract_entitlement(entitlement_match.group(), text)
demisto.handleEntitlementForUser(incident_id, guid, user.get('profile', {}).get('email'), content, task_id)
return 'Thank you for your response.'
else:
integration_context = demisto.getIntegrationContext()
questions = integration_context.get('questions', [])
if questions and thread_id:
questions = json.loads(questions)
question_filter = list(filter(lambda q: q.get('thread') == thread_id, questions))
if question_filter:
demisto.info('Slack - handling entitlement in thread.')
entitlement = question_filter[0].get('entitlement')
reply = question_filter[0].get('reply', 'Thank you for your response.')
content, guid, incident_id, task_id = extract_entitlement(entitlement, text)
demisto.handleEntitlementForUser(incident_id, guid, user.get('profile', {}).get('email'), content,
task_id)
questions.remove(question_filter[0])
set_to_latest_integration_context('questions', questions)
return reply
return ''
''' SEND '''
def get_conversation_by_name(conversation_name: str) -> dict:
"""
Get a slack conversation by its name
:param conversation_name: The conversation name
:return: The slack conversation
"""
response = CLIENT.conversations_list(types='private_channel,public_channel', limit=200)
conversation: dict = {}
while True:
conversations = response['channels'] if response and response.get('channels') else []
cursor = response.get('response_metadata', {}).get('next_cursor')
conversation_filter = list(filter(lambda c: c.get('name') == conversation_name, conversations))
if conversation_filter:
break
if not cursor:
break
response = CLIENT.conversations_list(types='private_channel,public_channel', limit=200, cursor=cursor)
if conversation_filter:
conversation = conversation_filter[0]
return conversation
def slack_send():
"""
Sends a message to slack
"""
message = demisto.args().get('message', '')
to = demisto.args().get('to')
channel = demisto.args().get('channel')
group = demisto.args().get('group')
message_type = demisto.args().get('messageType', '') # From server
original_message = demisto.args().get('originalMessage', '') # From server
entry = demisto.args().get('entry')
ignore_add_url = demisto.args().get('ignoreAddURL', False) or demisto.args().get('IgnoreAddURL', False)
thread_id = demisto.args().get('threadID', '')
severity = demisto.args().get('severity') # From server
blocks = demisto.args().get('blocks')
entitlement = ''
if message_type == MIRROR_TYPE and original_message.find(MESSAGE_FOOTER) != -1:
# return so there will not be a loop of messages
return
if (to and group) or (to and channel) or (to and channel and group):
return_error('Only one destination can be provided.')
if severity:
try:
severity = int(severity)
except Exception:
severity = None
pass
if channel == INCIDENT_NOTIFICATION_CHANNEL or (not channel and message_type == INCIDENT_OPENED):
channel = DEDICATED_CHANNEL
if channel == DEDICATED_CHANNEL and ((severity is not None and severity < SEVERITY_THRESHOLD)
or not NOTIFY_INCIDENTS):
channel = None
if not (to or group or channel):
return_error('Either a user, group or channel must be provided.')
reply = ''
expiry = ''
default_response = ''
if blocks:
entitlement_match = re.search(ENTITLEMENT_REGEX, blocks)
if entitlement_match:
try:
parsed_message = json.loads(blocks)
entitlement = parsed_message.get('entitlement')
blocks = parsed_message.get('blocks')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement blocks.')
pass
elif message:
entitlement_match = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
try:
parsed_message = json.loads(message)
entitlement = parsed_message.get('entitlement')
message = parsed_message.get('message')
reply = parsed_message.get('reply')
expiry = parsed_message.get('expiry')
default_response = parsed_message.get('default_response')
except Exception:
demisto.info('Slack - could not parse JSON from entitlement message.')
pass
response = slack_send_request(to, channel, group, entry, ignore_add_url, thread_id, message=message, blocks=blocks)
if response:
thread = response.get('ts')
if entitlement:
save_entitlement(entitlement, thread, reply, expiry, default_response)
demisto.results({
'Type': entryTypes['note'],
'Contents': 'Message sent to Slack successfully.\nThread ID is: {}'.format(thread),
'ContentsFormat': formats['text'],
'EntryContext': {
'Slack.Thread(val.ID===obj.ID)': {
'ID': thread
},
}
})
else:
demisto.results('Could not send the message to Slack.')
def save_entitlement(entitlement, thread, reply, expiry, default_response):
"""
Saves an entitlement with its thread
:param entitlement: The entitlement
:param thread: The thread
:param reply: The reply to send to the user.
:param expiry: The question expiration date.
:param default_response: The response to send if the question times out.
"""
integration_context = demisto.getIntegrationContext()
questions = integration_context.get('questions', [])
if questions:
questions = json.loads(integration_context['questions'])
questions.append({
'thread': thread,
'entitlement': entitlement,
'reply': reply,
'expiry': expiry,
'default_response': default_response
})
set_to_latest_integration_context('questions', questions)
def slack_send_file():
"""
Sends a file to slack
"""
to = demisto.args().get('to')
channel = demisto.args().get('channel')
group = demisto.args().get('group')
entry_id = demisto.args().get('file')
thread_id = demisto.args().get('threadID')
comment = demisto.args().get('comment', '')
if not (to or channel or group):
mirror = find_mirror_by_investigation()
if mirror:
channel = mirror.get('channel_name')
if not (to or channel or group):
return_error('Either a user, group or channel must be provided.')
file_path = demisto.getFilePath(entry_id)
with open(file_path['path'], 'rb') as file:
data = file.read()
file = {
'data': data,
'name': file_path['name'],
'comment': comment
}
response = slack_send_request(to, channel, group, thread_id=thread_id, file=file)
if response:
demisto.results('File sent to Slack successfully.')
else:
demisto.results('Could not send the file to Slack.')
def send_message(destinations: list, entry: str, ignore_add_url: bool, integration_context: dict, message: str,
thread_id: str, blocks: str):
"""
Sends a message to Slack.
:param destinations: The destinations to send to.
:param entry: A WarRoom entry to send.
:param ignore_add_url: Do not add a Demisto URL to the message.
:param integration_context: Current integration context.
:param message: The message to send.
:param thread_id: The Slack thread ID to send the message to.
:param blocks: Message blocks to send
:return: The Slack send response.
"""
if not message:
if blocks:
message = 'New message from SOC Bot'
# This is shown in the notification bubble from Slack
else:
message = '\n'
if message and not blocks:
if ignore_add_url and isinstance(ignore_add_url, str):
ignore_add_url = bool(strtobool(ignore_add_url))
if not ignore_add_url:
investigation = demisto.investigation()
server_links = demisto.demistoUrls()
if investigation:
if investigation.get('type') != PLAYGROUND_INVESTIGATION_TYPE:
link = server_links.get('warRoom')
if link:
if entry:
link += '/' + entry
message += '\n{} {}'.format('View it on:', link)
else:
link = server_links.get('server', '')
if link:
message += '\n{} {}'.format('View it on:', link + '#/home')
try:
response = send_message_to_destinations(destinations, message, thread_id, blocks)
except SlackApiError as e:
if str(e).find('not_in_channel') == -1 and str(e).find('channel_not_found') == -1:
raise
bot_id = integration_context.get('bot_id')
if not bot_id:
bot_id = get_bot_id()
for dest in destinations:
invite_users_to_conversation(dest, [bot_id])
response = send_message_to_destinations(destinations, message, thread_id, blocks)
return response
def send_message_to_destinations(destinations: list, message: str, thread_id: str, blocks: str = '') -> dict:
"""
Sends a message to provided destinations Slack.
:param destinations: Destinations to send to.
:param message: The message to send.
:param thread_id: Slack thread ID to send to.
:param blocks: Message blocks to send
:return: The Slack send response.
"""
response: dict = {}
kwargs: dict = {}
if message:
kwargs['text'] = message
if blocks:
block_list = json.loads(blocks)
kwargs['blocks'] = block_list
if thread_id:
kwargs['thread_ts'] = thread_id
for destination in destinations:
response = CLIENT.chat_postMessage(channel=destination, **kwargs)
return response
def send_file(destinations: list, file: dict, integration_context: dict, thread_id: str) -> dict:
"""
Sends a file to Slack.
:param destinations: Destinations to send the file to.
:param file: The file to send.
:param integration_context: The current integration context.
:param thread_id: A Slack thread to send to.
:return: The Slack send response.
"""
try:
response = send_file_to_destinations(destinations, file, thread_id)
except SlackApiError as e:
if str(e).find('not_in_channel') == -1 and str(e).find('channel_not_found') == -1:
raise
bot_id = integration_context.get('bot_id')
if not bot_id:
bot_id = get_bot_id()
integration_context['bot_id'] = bot_id
for dest in destinations:
invite_users_to_conversation(dest, [bot_id])
response = send_file_to_destinations(destinations, file, thread_id)
return response
def send_file_to_destinations(destinations: list, file: dict, thread_id: str) -> dict:
"""
Sends a file to provided destinations in Slack.
:param destinations: The destinations to send to.
:param file: The file to send.
:param thread_id: A thread ID to send to.
:return: The Slack send response.
"""
response: dict = {}
kwargs = {
'filename': file['name'],
'initial_comment': file['comment']
}
for destination in destinations:
kwargs['channels'] = destination
if thread_id:
kwargs['thread_ts'] = thread_id
response = CLIENT.files_upload(file=file['data'], **kwargs)
return response
def slack_send_request(to: str, channel: str, group: str, entry: str = '', ignore_add_url: bool = False,
thread_id: str = '', message: str = '', blocks: str = '', file: dict = None) -> dict:
"""
Requests to send a message or a file to Slack.
:param to: A Slack user to send to.
:param channel: A Slack channel to send to.
:param group: A Slack private channel to send to.
:param entry: WarRoom entry to send.
:param ignore_add_url: Do not add a Demisto URL to the message.
:param thread_id: The Slack thread ID to send to.
:param message: A message to send.
:param blocks: Blocks to send with a slack message
:param file: A file to send.
:return: The Slack send response.
"""
integration_context = demisto.getIntegrationContext()
conversations: list = []
mirrors: list = []
if integration_context:
if 'conversations' in integration_context:
conversations = json.loads(integration_context['conversations'])
if 'mirrors' in integration_context:
mirrors = json.loads(integration_context['mirrors'])
destinations = []
if to:
if isinstance(to, list):
to = to[0]
user = get_user_by_name(to)
if not user:
demisto.error('Could not find the Slack user {}'.format(to))
else:
im = CLIENT.im_open(user=user.get('id'))
destinations.append(im.get('channel', {}).get('id'))
if channel or group:
if not destinations:
destination_name = channel or group
conversation_filter = list(filter(lambda c: c.get('name') == destination_name, conversations))
if conversation_filter:
conversation = conversation_filter[0]
conversation_id = conversation.get('id')
else:
mirrored_channel_filter = list(filter(lambda m: 'incident-{}'
.format(m['investigation_id']) == destination_name, mirrors))
if mirrored_channel_filter:
channel_mirror = mirrored_channel_filter[0]
conversation_id = channel_mirror['channel_id']
else:
conversation = get_conversation_by_name(destination_name)
if not conversation:
return_error('Could not find the Slack conversation {}'.format(destination_name))
conversations.append(conversation)
set_to_latest_integration_context('conversations', conversations)
conversation_id = conversation.get('id')
if conversation_id:
destinations.append(conversation_id)
if not destinations:
return_error('Could not find any destination to send to.')
if file:
response = send_file(destinations, file, integration_context, thread_id)
return response
response = send_message(destinations, entry, ignore_add_url, integration_context, message,
thread_id, blocks)
return response
def set_channel_topic():
"""
Sets a topic for a slack channel
"""
channel = demisto.args().get('channel')
topic = demisto.args().get('topic')
channel_id = ''
if not channel:
mirror = find_mirror_by_investigation()
if mirror:
channel_id = mirror.get('channel_id', '')
# We need to update the topic in the mirror
integration_context = demisto.getIntegrationContext()
mirrors = json.loads(integration_context['mirrors'])
mirror = mirrors.pop(mirrors.index(mirror))
mirror['channel_topic'] = topic
mirrors.append(mirror)
set_to_latest_integration_context('mirrors', mirrors)
else:
channel = get_conversation_by_name(channel)
channel_id = channel.get('id')
if not channel_id:
return_error('Channel not found - the Demisto app needs to be a member of the channel in order to look it up.')
CHANNEL_CLIENT.conversations_setTopic(channel=channel_id, topic=topic)
demisto.results('Topic successfully set.')
def rename_channel():
"""
Renames a slack channel
"""
channel = demisto.args().get('channel')
new_name = demisto.args().get('name')
channel_id = ''
if not channel:
mirror = find_mirror_by_investigation()
if mirror:
channel_id = mirror.get('channel_id', '')
# We need to update the name in the mirror
integration_context = demisto.getIntegrationContext()
mirrors = json.loads(integration_context['mirrors'])
mirror = mirrors.pop(mirrors.index(mirror))
mirror['channel_name'] = new_name
mirrors.append(mirror)
set_to_latest_integration_context('mirrors', mirrors)
else:
channel = get_conversation_by_name(channel)
channel_id = channel.get('id')
if not channel_id:
return_error('Channel not found - the Demisto app needs to be a member of the channel in order to look it up.')
CHANNEL_CLIENT.conversations_rename(channel=channel_id, name=new_name)
demisto.results('Channel renamed successfully.')
def close_channel():
"""
Archives a slack channel by name or its incident ID if mirrored.
"""
channel = demisto.args().get('channel')
channel_id = ''
if not channel:
mirror = find_mirror_by_investigation()
if mirror:
channel_id = mirror.get('channel_id', '')
# We need to update the topic in the mirror
integration_context = demisto.getIntegrationContext()
mirrors = json.loads(integration_context['mirrors'])
mirror = mirrors.pop(mirrors.index(mirror))
channel_id = mirror['channel_id']
# Check for other mirrors on the archived channel
channel_mirrors = list(filter(lambda m: channel_id == m['channel_id'], mirrors))
for mirror in channel_mirrors:
mirrors.remove(mirror)
set_to_latest_integration_context('mirrors', mirrors)
else:
channel = get_conversation_by_name(channel)
channel_id = channel.get('id')
if not channel_id:
return_error('Channel not found - the Demisto app needs to be a member of the channel in order to look it up.')
CHANNEL_CLIENT.conversations_archive(channel=channel_id)
demisto.results('Channel successfully archived.')
def create_channel():
"""
Creates a channel in Slack using the provided arguments.
"""
channel_type = demisto.args().get('type', 'private')
channel_name = demisto.args()['name']
users = argToList(demisto.args().get('users', []))
topic = demisto.args().get('topic')
if channel_type != 'private':
conversation = CHANNEL_CLIENT.channels_create(name=channel_name).get('channel', {})
else:
conversation = CHANNEL_CLIENT.groups_create(name=channel_name).get('group', {})
if users:
slack_users = search_slack_users(users)
invite_users_to_conversation(conversation.get('id'), list(map(lambda u: u.get('id'), slack_users)))
if topic:
CHANNEL_CLIENT.conversations_setTopic(channel=conversation.get('id'), topic=topic)
demisto.results('Successfully created the channel {}.'.format(conversation.get('name')))
def invite_to_channel():
channel = demisto.args().get('channel')
users = argToList(demisto.args().get('users', []))
channel_id = ''
if not channel:
mirror = find_mirror_by_investigation()
if mirror:
channel_id = mirror['channel_id']
else:
channel = get_conversation_by_name(channel)
channel_id = channel.get('id')
if not channel_id:
return_error('Channel not found - the Demisto app needs to be a member of the channel in order to look it up.')
slack_users = search_slack_users(users)
if slack_users:
invite_users_to_conversation(channel_id, list(map(lambda u: u.get('id'), slack_users)))
else:
return_error('No users found')
demisto.results('Successfully invited users to the channel.')
def kick_from_channel():
channel = demisto.args().get('channel')
users = argToList(demisto.args().get('users', []))
channel_id = ''
if not channel:
mirror = find_mirror_by_investigation()
if mirror:
channel_id = mirror['channel_id']
else:
channel = get_conversation_by_name(channel)
channel_id = channel.get('id')
if not channel_id:
return_error('Channel not found - the Demisto app needs to be a member of the channel in order to look it up.')
slack_users = search_slack_users(users)
if slack_users:
kick_users_from_conversation(channel_id, list(map(lambda u: u.get('id'), slack_users)))
else:
return_error('No users found')
demisto.results('Successfully kicked users from the channel.')
def get_user():
user = demisto.args()['user']
slack_user = get_user_by_name(user)
if not slack_user:
return_error('User not found')
profile = slack_user.get('profile', {})
result_user = {
'ID': slack_user.get('id'),
'Username': slack_user.get('name'),
'Name': profile.get('real_name_normalized') or profile.get('real_name'),
'DisplayName': profile.get('display_name'),
'Email': profile.get('email')
}
hr = tableToMarkdown('Details for Slack user: ' + user, result_user,
headers=['ID', 'Username', 'Name', 'DisplayName', 'Email'], headerTransform=pascalToSpace,
removeNull=True)
context = {
'Slack.User(val.ID === obj.ID)': createContext(result_user, removeNull=True)
}
return_outputs(hr, context, slack_user)
def long_running_main():
"""
Starts the long running thread.
"""
asyncio.run(start_listening())
def init_globals():
"""
Initializes global variables according to the integration parameters
"""
global BOT_TOKEN, ACCESS_TOKEN, PROXY, DEDICATED_CHANNEL, CLIENT, CHANNEL_CLIENT
global SEVERITY_THRESHOLD, ALLOW_INCIDENTS, NOTIFY_INCIDENTS, INCIDENT_TYPE, VERIFY_CERT
BOT_TOKEN = demisto.params().get('bot_token')
ACCESS_TOKEN = demisto.params().get('access_token')
PROXY = handle_proxy().get('https')
DEDICATED_CHANNEL = demisto.params().get('incidentNotificationChannel')
CLIENT = slack.WebClient(token=BOT_TOKEN, proxy=PROXY)
CHANNEL_CLIENT = slack.WebClient(token=ACCESS_TOKEN, proxy=PROXY)
SEVERITY_THRESHOLD = SEVERITY_DICT.get(demisto.params().get('min_severity', 'Low'), 1)
ALLOW_INCIDENTS = demisto.params().get('allow_incidents', False)
NOTIFY_INCIDENTS = demisto.params().get('notify_incidents', True)
INCIDENT_TYPE = demisto.params().get('incidentType')
VERIFY_CERT = not demisto.params().get('unsecure', False)
def main():
"""
Main
"""
init_globals()
commands = {
'test-module': test_module,
'long-running-execution': long_running_main,
'slack-mirror-investigation': mirror_investigation,
'mirror-investigation': mirror_investigation,
'slack-send': slack_send,
'send-notification': slack_send,
'slack-send-file': slack_send_file,
'slack-set-channel-topic': set_channel_topic,
'close-channel': close_channel,
'slack-close-channel': close_channel,
'slack-create-channel': create_channel,
'slack-invite-to-channel': invite_to_channel,
'slack-kick-from-channel': kick_from_channel,
'slack-rename-channel': rename_channel,
'slack-get-user-details': get_user,
}
try:
command_func = commands[demisto.command()]
command_func()
except Exception as e:
LOG(e)
return_error(str(e))
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| 37.512974 | 119 | 0.621794 |
79550ff6cdb6364b3bf3778a54e6503802957c6b | 643 | py | Python | yaml_test/ex7_yam_json_read.py | terblac/mypynetcourse | 3252204e1c7996bad97e984e2c5182ddccbd2ce3 | [
"Apache-2.0"
] | null | null | null | yaml_test/ex7_yam_json_read.py | terblac/mypynetcourse | 3252204e1c7996bad97e984e2c5182ddccbd2ce3 | [
"Apache-2.0"
] | null | null | null | yaml_test/ex7_yam_json_read.py | terblac/mypynetcourse | 3252204e1c7996bad97e984e2c5182ddccbd2ce3 | [
"Apache-2.0"
] | null | null | null |
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
| 17.378378 | 58 | 0.600311 |
795510c043ad4db4a7279a8116b558665b3c808b | 24,898 | py | Python | toontown/building/DistributedBoardingPartyAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | toontown/building/DistributedBoardingPartyAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/building/DistributedBoardingPartyAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from otp.otpbase import OTPGlobals
from otp.ai.AIBase import *
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from ElevatorConstants import *
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.building import BoardingPartyBase
from toontown.toonbase import ToontownAccessAI
GROUPMEMBER = 0
GROUPINVITE = 1
class DistributedBoardingPartyAI(DistributedObjectAI.DistributedObjectAI, BoardingPartyBase.BoardingPartyBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBoardingPartyAI')
def __init__(self, air, elevatorList, maxSize=4):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
BoardingPartyBase.BoardingPartyBase.__init__(self)
self.setGroupSize(maxSize)
self.elevatorIdList = elevatorList
self.visibleZones = []
def delete(self):
self.cleanup()
DistributedObjectAI.DistributedObjectAI.delete(self)
def generate(self):
DistributedObjectAI.DistributedObjectAI.generate(self)
for elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
elevator.setBoardingParty(self)
store = simbase.air.dnaStoreMap.get(self.zoneId)
if store:
numVisGroups = store.getNumDNAVisGroupsAI()
myVisGroup = None
for index in xrange(numVisGroups):
if store.getDNAVisGroupAI(index).getName() == str(self.zoneId):
myVisGroup = store.getDNAVisGroupAI(index)
if myVisGroup:
numVisibles = myVisGroup.getNumVisibles()
for index in xrange(numVisibles):
newVisible = myVisGroup.getVisibleName(index)
self.visibleZones.append(int(newVisible))
else:
self.visibleZones = [
self.zoneId]
else:
self.visibleZones = [
self.zoneId]
return
def cleanup(self):
BoardingPartyBase.BoardingPartyBase.cleanup(self)
del self.elevatorIdList
del self.visibleZones
def getElevatorIdList(self):
return self.elevatorIdList
def setElevatorIdList(self, elevatorIdList):
self.elevatorIdList = elevatorIdList
def addWacthAvStatus(self, avId):
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.handleAvatarDisco, extraArgs=[avId])
self.accept(self.staticGetLogicalZoneChangeEvent(avId), self.handleAvatarZoneChange, extraArgs=[avId])
messageToonAdded = 'Battle adding toon %s' % avId
self.accept(messageToonAdded, self.handleToonJoinedBattle)
messageToonReleased = 'Battle releasing toon %s' % avId
self.accept(messageToonReleased, self.handleToonLeftBattle)
def handleToonJoinedBattle(self, avId):
self.notify.debug('handleToonJoinedBattle %s' % avId)
def handleToonLeftBattle(self, avId):
self.notify.debug('handleToonLeftBattle %s' % avId)
def removeWacthAvStatus(self, avId):
self.ignore(self.air.getAvatarExitEvent(avId))
self.ignore(self.staticGetLogicalZoneChangeEvent(avId))
def requestInvite(self, inviteeId):
self.notify.debug('requestInvite %s' % inviteeId)
inviterId = self.air.getAvatarIdFromSender()
invitee = simbase.air.doId2do.get(inviteeId)
inviter = simbase.air.doId2do.get(inviterId)
inviterOkay = self.checkBoard(inviterId, self.elevatorIdList[0])
if inviterOkay == REJECT_NOTPAID:
reason = BoardingPartyBase.BOARDCODE_NOT_PAID
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
simbase.air.writeServerEvent('suspicious', inviterId, 'User with rights: %s tried to invite someone to a boarding group' % inviter.getGameAccess())
if simbase.config.GetBool('want-ban-boardingparty', True):
commentStr = 'User with rights: %s tried to invite someone to a boarding group' % inviter.getGameAccess()
dislId = inviter.DISLid
simbase.air.banManager.ban(inviterId, dislId, commentStr)
return
if invitee and invitee.battleId != 0:
reason = BoardingPartyBase.BOARDCODE_BATTLE
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
self.sendUpdateToAvatarId(inviteeId, 'postMessageInvitationFailed', [inviterId])
return
if self.hasActiveGroup(inviteeId):
reason = BoardingPartyBase.BOARDCODE_DIFF_GROUP
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
self.sendUpdateToAvatarId(inviteeId, 'postMessageInvitationFailed', [inviterId])
return
if self.hasPendingInvite(inviteeId):
reason = BoardingPartyBase.BOARDCODE_PENDING_INVITE
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
self.sendUpdateToAvatarId(inviteeId, 'postMessageInvitationFailed', [inviterId])
return
if self.__isInElevator(inviteeId):
reason = BoardingPartyBase.BOARDCODE_IN_ELEVATOR
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
self.sendUpdateToAvatarId(inviteeId, 'postMessageInvitationFailed', [inviterId])
return
inviteeOkay = self.checkBoard(inviteeId, self.elevatorIdList[0])
reason = 0
if inviteeOkay == REJECT_NOTPAID:
reason = BoardingPartyBase.BOARDCODE_NOT_PAID
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, 0])
return
if len(self.elevatorIdList) == 1:
if inviteeOkay:
if inviteeOkay == REJECT_MINLAFF:
reason = BoardingPartyBase.BOARDCODE_MINLAFF
elif inviteeOkay == REJECT_PROMOTION:
reason = BoardingPartyBase.BOARDCODE_PROMOTION
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviteeId, reason, self.elevatorIdList[0]])
return
else:
inviterOkay = self.checkBoard(inviterId, self.elevatorIdList[0])
if inviterOkay:
if inviterOkay == REJECT_MINLAFF:
reason = BoardingPartyBase.BOARDCODE_MINLAFF
elif inviterOkay == REJECT_PROMOTION:
reason = BoardingPartyBase.BOARDCODE_PROMOTION
self.sendUpdateToAvatarId(inviterId, 'postInviteNotQualify', [inviterId, reason, self.elevatorIdList[0]])
return
if inviterId in self.avIdDict:
self.notify.debug('old group')
leaderId = self.avIdDict[inviterId]
groupList = self.groupListDict.get(leaderId)
if groupList:
self.notify.debug('got group list')
if inviterId == leaderId:
if inviteeId in groupList[2]:
groupList[2].remove(inviteeId)
if len(self.getGroupMemberList(leaderId)) >= self.maxSize:
self.sendUpdate('postSizeReject', [leaderId, inviterId, inviteeId])
elif inviterId not in groupList[1] and inviterId not in groupList[2]:
if inviteeId not in groupList[1]:
groupList[1].append(inviteeId)
self.groupListDict[leaderId] = groupList
if inviteeId in self.avIdDict:
self.notify.warning('inviter %s tried to invite %s who already exists in the avIdDict.' % (inviterId, inviteeId))
self.air.writeServerEvent('suspicious: inviter', inviterId, ' tried to invite %s who already exists in the avIdDict.' % inviteeId)
self.avIdDict[inviteeId] = leaderId
self.sendUpdateToAvatarId(inviteeId, 'postInvite', [leaderId, inviterId])
for memberId in groupList[0]:
if not memberId == inviterId:
self.sendUpdateToAvatarId(memberId, 'postMessageInvited', [inviteeId, inviterId])
elif inviterId in groupList[2]:
self.sendUpdate('postKickReject', [leaderId, inviterId, inviteeId])
else:
if inviteeId in self.avIdDict:
self.notify.warning('inviter %s tried to invite %s who already exists in avIdDict.' % (inviterId, inviteeId))
self.air.writeServerEvent('suspicious: inviter', inviterId, ' tried to invite %s who already exists in the avIdDict.' % inviteeId)
self.notify.debug('new group')
leaderId = inviterId
self.avIdDict[inviterId] = inviterId
self.avIdDict[inviteeId] = inviterId
self.groupListDict[leaderId] = [[leaderId], [inviteeId], []]
self.addWacthAvStatus(leaderId)
self.sendUpdateToAvatarId(inviteeId, 'postInvite', [leaderId, inviterId])
def requestCancelInvite(self, inviteeId):
inviterId = self.air.getAvatarIdFromSender()
if inviterId in self.avIdDict:
leaderId = self.avIdDict[inviterId]
groupList = self.groupListDict.get(leaderId)
if groupList:
self.removeFromGroup(leaderId, inviteeId)
self.sendUpdateToAvatarId(inviteeId, 'postInviteCanceled', [])
def requestAcceptInvite(self, leaderId, inviterId):
inviteeId = self.air.getAvatarIdFromSender()
self.notify.debug('requestAcceptInvite leader%s inviter%s invitee%s' % (leaderId, inviterId, inviteeId))
if inviteeId in self.avIdDict:
if self.hasActiveGroup(inviteeId):
self.sendUpdateToAvatarId(inviteeId, 'postAlreadyInGroup', [])
return
if leaderId not in self.avIdDict or not self.isInGroup(inviteeId, leaderId):
self.sendUpdateToAvatarId(inviteeId, 'postSomethingMissing', [])
return
memberList = self.getGroupMemberList(leaderId)
if self.avIdDict[inviteeId]:
if self.avIdDict[inviteeId] == leaderId:
if inviteeId in memberList:
self.notify.debug('invitee already in group, aborting requestAcceptInvite')
return
else:
self.air.writeServerEvent('suspicious: ', inviteeId, " accepted a second invite from %s, in %s's group, while he was in alredy in %s's group." % (inviterId, leaderId, self.avIdDict[inviteeId]))
self.removeFromGroup(self.avIdDict[inviteeId], inviteeId, post=0)
if len(memberList) >= self.maxSize:
self.removeFromGroup(leaderId, inviteeId)
self.sendUpdateToAvatarId(inviterId, 'postMessageAcceptanceFailed', [inviteeId, BoardingPartyBase.INVITE_ACCEPT_FAIL_GROUP_FULL])
self.sendUpdateToAvatarId(inviteeId, 'postGroupAlreadyFull', [])
return
self.sendUpdateToAvatarId(inviterId, 'postInviteAccepted', [inviteeId])
self.addToGroup(leaderId, inviteeId)
else:
self.air.writeServerEvent('suspicious: ', inviteeId, " was invited to %s's group by %s, but the invitee didn't have an entry in the avIdDict." % (leaderId, inviterId))
def requestRejectInvite(self, leaderId, inviterId):
inviteeId = self.air.getAvatarIdFromSender()
self.removeFromGroup(leaderId, inviteeId)
self.sendUpdateToAvatarId(inviterId, 'postInviteDelcined', [inviteeId])
def requestKick(self, kickId):
leaderId = self.air.getAvatarIdFromSender()
if kickId in self.avIdDict:
if self.avIdDict[kickId] == leaderId:
self.removeFromGroup(leaderId, kickId, kick=1)
self.sendUpdateToAvatarId(kickId, 'postKick', [leaderId])
def requestLeave(self, leaderId):
memberId = self.air.getAvatarIdFromSender()
if memberId in self.avIdDict:
if leaderId == self.avIdDict[memberId]:
self.removeFromGroup(leaderId, memberId)
def checkBoard(self, avId, elevatorId):
elevator = simbase.air.doId2do.get(elevatorId)
avatar = simbase.air.doId2do.get(avId)
if avatar:
if not ToontownAccessAI.canAccess(avId, self.zoneId, 'DistributedBoardingPartyAI.checkBoard'):
return REJECT_NOTPAID
elif elevator:
return elevator.checkBoard(avatar)
return REJECT_BOARDINGPARTY
def testBoard(self, leaderId, elevatorId, needSpace=0):
elevator = None
boardOkay = BoardingPartyBase.BOARDCODE_MISSING
avatarsFailingRequirements = []
avatarsInBattle = []
if elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
if elevator:
if leaderId in self.avIdDict:
if leaderId == self.avIdDict[leaderId]:
boardOkay = BoardingPartyBase.BOARDCODE_OKAY
for avId in self.getGroupMemberList(leaderId):
avatar = simbase.air.doId2do.get(avId)
if avatar:
if elevator.checkBoard(avatar) != 0:
if elevator.checkBoard(avatar) == REJECT_MINLAFF:
boardOkay = BoardingPartyBase.BOARDCODE_MINLAFF
else:
if elevator.checkBoard(avatar) == REJECT_PROMOTION:
boardOkay = BoardingPartyBase.BOARDCODE_PROMOTION
avatarsFailingRequirements.append(avId)
elif avatar.battleId != 0:
boardOkay = BoardingPartyBase.BOARDCODE_BATTLE
avatarsInBattle.append(avId)
groupSize = len(self.getGroupMemberList(leaderId))
if groupSize > self.maxSize:
boardOkay = BoardingPartyBase.BOARDCODE_SPACE
if needSpace:
if groupSize > elevator.countOpenSeats():
boardOkay = BoardingPartyBase.BOARDCODE_SPACE
if boardOkay != BoardingPartyBase.BOARDCODE_OKAY:
self.notify.debug('Something is wrong with the group board request')
if boardOkay == BoardingPartyBase.BOARDCODE_MINLAFF:
self.notify.debug('An avatar did not meet the elevator laff requirements')
if boardOkay == BoardingPartyBase.BOARDCODE_PROMOTION:
self.notify.debug('An avatar did not meet the elevator promotion requirements')
elif boardOkay == BoardingPartyBase.BOARDCODE_BATTLE:
self.notify.debug('An avatar is in battle')
return (boardOkay, avatarsFailingRequirements, avatarsInBattle)
def requestBoard(self, elevatorId):
wantDisableGoButton = False
leaderId = self.air.getAvatarIdFromSender()
elevator = None
if elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
if elevator:
if leaderId in self.avIdDict:
if leaderId == self.avIdDict[leaderId]:
group = self.groupListDict.get(leaderId)
if group:
boardOkay, avatarsFailingRequirements, avatarsInBattle = self.testBoard(leaderId, elevatorId, needSpace=1)
if boardOkay == BoardingPartyBase.BOARDCODE_OKAY:
leader = simbase.air.doId2do.get(leaderId)
if leader:
elevator.partyAvatarBoard(leader)
wantDisableGoButton = True
for avId in group[0]:
if not avId == leaderId:
avatar = simbase.air.doId2do.get(avId)
if avatar:
elevator.partyAvatarBoard(avatar, wantBoardingShow=1)
self.air.writeServerEvent('boarding_elevator', self.zoneId, '%s; Sending avatars %s' % (elevatorId, group[0]))
else:
self.sendUpdateToAvatarId(leaderId, 'postRejectBoard', [elevatorId, boardOkay, avatarsFailingRequirements, avatarsInBattle])
return
if not wantDisableGoButton:
self.sendUpdateToAvatarId(leaderId, 'postRejectBoard', [elevatorId, BoardingPartyBase.BOARDCODE_MISSING, [], []])
return
def testGoButtonRequirements(self, leaderId, elevatorId):
if leaderId in self.avIdDict:
if leaderId == self.avIdDict[leaderId]:
if elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
if elevator:
boardOkay, avatarsFailingRequirements, avatarsInBattle = self.testBoard(leaderId, elevatorId, needSpace=0)
if boardOkay == BoardingPartyBase.BOARDCODE_OKAY:
avList = self.getGroupMemberList(leaderId)
if 0 in avList:
avList.remove(0)
if leaderId not in elevator.seats:
return True
else:
self.notify.warning('avId: %s has hacked his/her client.' % leaderId)
self.air.writeServerEvent('suspicious: ', leaderId, ' pressed the GO Button while inside the elevator.')
else:
self.sendUpdateToAvatarId(leaderId, 'rejectGoToRequest', [elevatorId, boardOkay, avatarsFailingRequirements, avatarsInBattle])
return False
def requestGoToFirstTime(self, elevatorId):
callerId = self.air.getAvatarIdFromSender()
if self.testGoButtonRequirements(callerId, elevatorId):
self.sendUpdateToAvatarId(callerId, 'acceptGoToFirstTime', [elevatorId])
def requestGoToSecondTime(self, elevatorId):
callerId = self.air.getAvatarIdFromSender()
avList = self.getGroupMemberList(callerId)
if self.testGoButtonRequirements(callerId, elevatorId):
for avId in avList:
self.sendUpdateToAvatarId(avId, 'acceptGoToSecondTime', [elevatorId])
THREE_SECONDS = 3.0
taskMgr.doMethodLater(THREE_SECONDS, self.sendAvatarsToDestinationTask, self.uniqueName('sendAvatarsToDestinationTask'), extraArgs=[elevatorId, avList], appendTask=True)
def sendAvatarsToDestinationTask(self, elevatorId, avList, task):
self.notify.debug('entering sendAvatarsToDestinationTask')
if len(avList):
if elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
if elevator:
self.notify.warning('Sending avatars %s' % avList)
boardOkay, avatarsFailingRequirements, avatarsInBattle = self.testBoard(avList[0], elevatorId, needSpace=0)
if not boardOkay == BoardingPartyBase.BOARDCODE_OKAY:
for avId in avatarsFailingRequirements:
self.air.writeServerEvent('suspicious: ', avId, ' failed requirements after the second go button request.')
for avId in avatarsInBattle:
self.air.writeServerEvent('suspicious: ', avId, ' joined battle after the second go button request.')
self.air.writeServerEvent('boarding_go', self.zoneId, '%s; Sending avatars %s' % (elevatorId, avList))
elevator.sendAvatarsToDestination(avList)
return Task.done
def handleAvatarDisco(self, avId):
self.notify.debug('handleAvatarDisco %s' % avId)
if avId in self.avIdDict:
leaderId = self.avIdDict[avId]
self.removeFromGroup(leaderId, avId)
def handleAvatarZoneChange(self, avId, zoneNew, zoneOld):
self.notify.debug('handleAvatarZoneChange %s new%s old%s bp%s' % (avId, zoneNew, zoneOld, self.zoneId))
if zoneNew in self.visibleZones:
self.toonInZone(avId)
else:
if avId in self.avIdDict:
leaderId = self.avIdDict[avId]
self.removeFromGroup(leaderId, avId)
def toonInZone(self, avId):
if avId in self.avIdDict:
leaderId = self.avIdDict[avId]
group = self.groupListDict.get(leaderId)
if leaderId and group:
self.notify.debug('Calling postGroupInfo from toonInZone')
def addToGroup(self, leaderId, inviteeId, post=1):
group = self.groupListDict.get(leaderId)
if group:
self.avIdDict[inviteeId] = leaderId
if inviteeId in group[1]:
group[1].remove(inviteeId)
if inviteeId not in group[0]:
group[0].append(inviteeId)
self.groupListDict[leaderId] = group
if post:
self.notify.debug('Calling postGroupInfo from addToGroup')
self.sendUpdate('postGroupInfo', [leaderId, group[0], group[1], group[2]])
self.addWacthAvStatus(inviteeId)
else:
self.sendUpdate('postGroupDissolve', [leaderId, leaderId, [], 0])
def removeFromGroup(self, leaderId, memberId, kick=0, post=1):
self.notify.debug('')
self.notify.debug('removeFromGroup leaderId %s memberId %s' % (leaderId, memberId))
self.notify.debug('Groups %s' % self.groupListDict)
self.notify.debug('avDict %s' % self.avIdDict)
if leaderId not in self.avIdDict:
self.sendUpdate('postGroupDissolve', [memberId, leaderId, [], kick])
if memberId in self.avIdDict:
self.avIdDict.pop(memberId)
return
self.removeWacthAvStatus(memberId)
group = self.groupListDict.get(leaderId)
if group:
if memberId in group[0]:
group[0].remove(memberId)
if memberId in group[1]:
group[1].remove(memberId)
if memberId in group[2]:
group[2].remove(memberId)
if kick:
group[2].append(memberId)
else:
return
if memberId == leaderId or len(group[0]) < 2:
if leaderId in self.avIdDict:
self.avIdDict.pop(leaderId)
for inviteeId in group[1]:
if inviteeId in self.avIdDict:
self.avIdDict.pop(inviteeId)
self.sendUpdateToAvatarId(inviteeId, 'postInviteCanceled', [])
dgroup = self.groupListDict.pop(leaderId)
for dMemberId in dgroup[0]:
if dMemberId in self.avIdDict:
self.avIdDict.pop(dMemberId)
self.notify.debug('postGroupDissolve')
dgroup[0].insert(0, memberId)
self.sendUpdate('postGroupDissolve', [memberId, leaderId, dgroup[0], kick])
else:
self.groupListDict[leaderId] = group
if post:
self.notify.debug('Calling postGroupInfo from removeFromGroup')
self.sendUpdate('postGroupInfo', [leaderId, group[0], group[1], group[2]])
if memberId in self.avIdDict:
self.avIdDict.pop(memberId)
self.notify.debug('Remove from group END')
self.notify.debug('Groups %s' % self.groupListDict)
self.notify.debug('avDict %s' % self.avIdDict)
self.notify.debug('')
def informDestinationInfo(self, offset):
leaderId = self.air.getAvatarIdFromSender()
if offset > len(self.elevatorIdList):
self.air.writeServerEvent('suspicious: ', leaderId, 'has requested to go to %s elevator which does not exist' % offset)
return
memberList = self.getGroupMemberList(leaderId)
for avId in memberList:
if avId != leaderId:
self.sendUpdateToAvatarId(avId, 'postDestinationInfo', [offset])
def __isInElevator(self, avId):
inElevator = False
for elevatorId in self.elevatorIdList:
elevator = simbase.air.doId2do.get(elevatorId)
if elevator:
if avId in elevator.seats:
inElevator = True
return inElevator
| 51.548654 | 213 | 0.616395 |
795510dd419cbfd0e9411acb56126cfee17a10bd | 218 | py | Python | src/setup.py | illescasDaniel/SendEmailPython | 49b6d32264a0af6872776968f002c57db250c121 | [
"MIT"
] | null | null | null | src/setup.py | illescasDaniel/SendEmailPython | 49b6d32264a0af6872776968f002c57db250c121 | [
"MIT"
] | null | null | null | src/setup.py | illescasDaniel/SendEmailPython | 49b6d32264a0af6872776968f002c57db250c121 | [
"MIT"
] | null | null | null | from setuptools import setup
APP = ['SendEmail.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 16.769231 | 34 | 0.665138 |
79551100bd9b4540d4dd11994aadcf9448b4b396 | 1,394 | py | Python | setup.py | insad/SpeedTorch | 82cfcbe3908de6baba5cb01b51aafb97da4c99f4 | [
"MIT"
] | 683 | 2019-09-14T11:28:17.000Z | 2022-03-31T02:06:22.000Z | setup.py | insad/SpeedTorch | 82cfcbe3908de6baba5cb01b51aafb97da4c99f4 | [
"MIT"
] | 8 | 2019-09-10T21:23:46.000Z | 2021-11-03T03:11:59.000Z | setup.py | insad/SpeedTorch | 82cfcbe3908de6baba5cb01b51aafb97da4c99f4 | [
"MIT"
] | 42 | 2019-09-09T07:09:13.000Z | 2022-03-31T02:06:17.000Z | import codecs
from setuptools import setup, find_packages
with codecs.open('README.md', 'r', 'utf8') as reader:
long_description = reader.read()
with codecs.open('requirements.txt', 'r', 'utf8') as reader:
install_requires = list(map(lambda x: x.strip(), reader.readlines()))
try:
import cupy
except:
print('CuPy is not available. Please install it manually: https://docs-cupy.chainer.org/en/stable/install.html#install-cupy')
# print('Cupy not installed. Package will install cupy, which will take several minutes')
# print('If you would like to install Cupy yourself, check here https://docs-cupy.chainer.org/en/stable/install.html#install-cupy')
# install_requires.append('cupy')
setup(
name='SpeedTorch',
version='0.1.6',
packages=find_packages(),
url='https://github.com/Santosh-Gupta/SpeedTorch',
license='MIT',
author='Santosh Gupta',
author_email='SanGupta.ML@gmail.com',
description='Fast Pinned CPU -> GPU transfer',
long_description='Fast Pinned CPU -> GPU transfer',
long_description_content_taype='text/markdown',
python_requires='>=3.5.0',
install_requires=install_requires,
classifiers=(
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 36.684211 | 135 | 0.685079 |
7955120232963d500ec7530b9402964ccd053f9f | 2,033 | py | Python | RASA_Restaurant_Chatbot/core/train_init.py | anugrahasinha/nlp | 3fd7ac305678eb6c2d761f6239ce5e53a016a3e4 | [
"MIT"
] | null | null | null | RASA_Restaurant_Chatbot/core/train_init.py | anugrahasinha/nlp | 3fd7ac305678eb6c2d761f6239ce5e53a016a3e4 | [
"MIT"
] | null | null | null | RASA_Restaurant_Chatbot/core/train_init.py | anugrahasinha/nlp | 3fd7ac305678eb6c2d761f6239ce5e53a016a3e4 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import os
from datetime import datetime
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer, BinarySingleStateFeaturizer)
from lib import ChatBotConfigParser
from lib import ChatBotLogging
logger = logging.getLogger("ChatBotBase.RasaCoreTrain")
class RasaCoreTrain(object):
def __init__(self):
try:
self.config = ChatBotConfigParser().parser
except Exception as e:
logger.error("Unable to build RasaCoreTrain Obj, exception : %s" %(str(e)))
raise(e)
def trainRasaCore(self):
try:
training_data_file = "./" + self.config.get('inputData','stories')
domain_yml = "./" + self.config.get('inputData','coreyml')
logger.info("Building RASA Core model with stories : %s, domain_yml : %s" %(training_data_file,domain_yml))
model_name = "model_" + datetime.now().strftime("%Y%m%dT%H%M%S")
model_location = "./models/ourgroup/dialogue/" + model_name
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent(domain_yml, policies = [MemoizationPolicy(max_history = 4), KerasPolicy(featurizer)])
agent.train(training_data_file,
augmentation_factor = 50,
#max_history = 4,
epochs = 500,
batch_size = 30,
validation_split = 0.2)
agent.persist(model_location)
model_location = os.path.realpath(model_location)
logger.info("RASA Core model_location : %s" %(str(model_location)))
self.config.set('coreModel','model_location',value=model_location)
with open("./etc/config.ini","w+") as f:
self.config.write(f)
return ("RASA core model training completed, see details above")
except Exception as e:
logger.error("unable to train rasa core model, exception : %s" %(str(e)))
raise(e) | 36.303571 | 110 | 0.735858 |
7955125e500373157e10828a165c439ade25fa2c | 4,326 | py | Python | gpy_dla_detection/plottings/plot_model.py | jibanCat/gpy_dla_detection | 4d987adec75a417313fdc6601ee41a0ea60a0a2e | [
"MIT"
] | 1 | 2020-07-31T01:31:52.000Z | 2020-07-31T01:31:52.000Z | gpy_dla_detection/plottings/plot_model.py | jibanCat/gpy_dla_detection | 4d987adec75a417313fdc6601ee41a0ea60a0a2e | [
"MIT"
] | 12 | 2020-07-20T18:55:15.000Z | 2021-09-23T05:08:26.000Z | gpy_dla_detection/plottings/plot_model.py | jibanCat/gpy_dla_detection | 4d987adec75a417313fdc6601ee41a0ea60a0a2e | [
"MIT"
] | null | null | null | """
Plot the GP model with sample likelihoods
"""
from typing import Optional
import numpy as np
from matplotlib import pyplot as plt
from ..dla_gp import DLAGP
def plot_dla_model(
dla_gp: DLAGP,
nth_dla: int,
title: Optional[str] = None,
label: Optional[str] = None,
):
# plot both mean GP model and the sample
fig, ax = plt.subplots(2, 1, figsize=(16, 10))
# [title] e.g., thing_id=, DLA(nth_dla)=?
ax[0].set_title(title)
plot_sample_likelihoods(dla_gp=dla_gp, ax=ax[0])
# [label] e.g., spec-xxx-xxx-xxx
plot_this_mu(dla_gp=dla_gp, nth_dla=nth_dla, ax=ax[1], label=label)
def plot_this_mu(
dla_gp: DLAGP,
nth_dla: int = 1,
ax: Optional[plt.axes] = None,
label: Optional[str] = None,
):
"""
Plot the GP mean model onto data
:param dla_gp: the DLAGP instance you want to plot
:param nth_dla: the num of DLA you want to plot. Default 1 DLA.
:param ax: the matplotlib.pyplot.ax you want to plot on. if None, generate a new one.
:param label: the label you want to put on the figure.
"""
# [check] make sure we ran the log_evidence
assert "sample_log_likelihoods" in dir(dla_gp)
this_rest_wavelengths = dla_gp.x
this_flux = dla_gp.y
this_mu = dla_gp.this_mu
# [ax] if not given, create a new one
if not ax:
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
# observed data
ax.plot(this_rest_wavelengths, this_flux, label=label, color="C0")
# DLA model
if nth_dla > 0:
# [MAP] maximum a posteriori values
# N * (1~k models) * (1~k MAP dlas)
MAP_z_dla, MAP_log_nhi = dla_gp.maximum_a_posteriori()
# make them to be 1-D array
map_z_dlas = MAP_z_dla[nth_dla - 1, :nth_dla]
map_log_nhis = MAP_log_nhi[nth_dla - 1, :nth_dla]
# feed in MAP values and get the absorption profile given (z_dlas, nhis)
dla_mu, dla_M, dla_omega2 = dla_gp.this_dla_gp(map_z_dlas, 10 ** map_log_nhis)
ax.plot(
this_rest_wavelengths,
dla_mu,
label=r"$\mathcal{M}$"
+ r" DLA({n}); ".format(n=nth_dla)
+ "z_dlas = ({}); ".format(",".join("{:.3g}".format(z) for z in map_z_dlas))
+ "lognhi = ({})".format(
",".join("{:.3g}".format(n) for n in map_log_nhis)
),
color="red",
)
else:
ax.plot(
this_rest_wavelengths,
this_mu,
label=r"$\mathcal{M}$" + r" DLA({n})".format(n=0),
color="red",
)
ax.set_xlim(this_rest_wavelengths.min(), this_rest_wavelengths.max())
ax.set_ylim( this_mu.min() - 2, this_mu.max() + 1 )
ax.set_xlabel(r"Rest-Wavelength $\lambda_{\mathrm{rest}}$ $\AA$")
ax.set_ylabel(r"Normalised Flux")
ax.legend()
def plot_sample_likelihoods(dla_gp: DLAGP, ax: Optional[plt.axes] = None):
"""
Plot the sample likelihoods in the parameter space
"""
sample_z_dlas = dla_gp.dla_samples.sample_z_dlas(
dla_gp.this_wavelengths, dla_gp.z_qso
)
# [color sequence] convert sample log likelihoods to values in (0, 1)
sample_log_likelihoods = dla_gp.sample_log_likelihoods[
:, 0
] # only query the DLA(1) likelihoods
# TODO: marginalize over k DLAs
max_like = np.nanmax(sample_log_likelihoods)
min_like = np.nanmin(sample_log_likelihoods)
colours = (sample_log_likelihoods - min_like) / (max_like - min_like)
# scale to make the colour more visible
# TODO: make it more reasonable. scatter only takes values between [0, 1].
colours = colours * 5 - 4
colours[colours < 0] = 0
if not ax:
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
ax.scatter(
sample_z_dlas, dla_gp.dla_samples.log_nhi_samples, c=colours,
)
# [min max sample zDLAs] instead of using min max from sample_z_dlas
# using the zDLAs converted from wavelengths will better reflect the
# range of wavelengths range in the this_mu plot.
z_dlas = (dla_gp.this_wavelengths / dla_gp.params.lya_wavelength) - 1
ax.set_xlim(z_dlas.min(), z_dlas.max())
ax.set_ylim(
dla_gp.dla_samples.log_nhi_samples.min(),
dla_gp.dla_samples.log_nhi_samples.max(),
)
ax.set_xlabel(r"$z_{DLA}$")
ax.set_ylabel(r"$log N_{HI}$")
| 31.808824 | 89 | 0.629219 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.